gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
import os
import sys
import inspect
from collections import OrderedDict
from os.path import join as pjoin
this_dir = os.path.abspath(os.path.split(__file__)[0])
sys.path.insert(0, os.path.join(this_dir, '../'))
from libcloud.compute .base import NodeDriver
from libcloud.compute.providers import get_driver as get_compute_driver
from libcloud.compute.providers import DRIVERS as COMPUTE_DRIVERS
from libcloud.compute.types import Provider as ComputeProvider
from libcloud.loadbalancer.base import Driver as LBDriver
from libcloud.loadbalancer.providers import get_driver as get_lb_driver
from libcloud.loadbalancer.providers import DRIVERS as LB_DRIVERS
from libcloud.loadbalancer.types import Provider as LBProvider
from libcloud.storage.base import StorageDriver
from libcloud.storage.providers import get_driver as get_storage_driver
from libcloud.storage.providers import DRIVERS as STORAGE_DRIVERS
from libcloud.storage.types import Provider as StorageProvider
from libcloud.dns.base import DNSDriver
from libcloud.dns.providers import get_driver as get_dns_driver
from libcloud.dns.providers import DRIVERS as DNS_DRIVERS
from libcloud.dns.types import Provider as DNSProvider
REQUIRED_DEPENDENCIES = [
'pysphere'
]
for dependency in REQUIRED_DEPENDENCIES:
try:
__import__(dependency)
except ImportError:
msg = 'Missing required dependency: %s' % (dependency)
raise ImportError(msg)
BASE_API_METHODS = {
'compute_main': ['list_nodes', 'create_node', 'reboot_node',
'destroy_node', 'list_images', 'list_sizes',
'deploy_node'],
'compute_image_management': ['list_images', 'get_image',
'create_image', 'delete_image', 'copy_image'],
'compute_block_storage': ['list_volumes', 'create_volume',
'destroy_volume',
'attach_volume', 'detach_volume',
'list_volume_snapshots',
'create_volume_snapshot'],
'compute_key_pair_management': ['list_key_pairs', 'get_key_pair',
'create_key_pair',
'import_key_pair_from_string',
'import_key_pair_from_file',
'delete_key_pair'],
'loadbalancer': ['create_balancer', 'list_balancers',
'balancer_list_members', 'balancer_attach_member',
'balancer_detach_member', 'balancer_attach_compute_node'],
'storage_main': ['list_containers', 'list_container_objects',
'iterate_containers', 'iterate_container_objects',
'create_container', 'delete_container', 'upload_object',
'upload_object_via_stream', 'download_object',
'download_object_as_stream', 'delete_object'],
'storage_cdn': ['enable_container_cdn', 'enable_object_cdn',
'get_container_cdn_url', 'get_object_cdn_url'],
'dns': ['list_zones', 'list_records', 'iterate_zones', 'iterate_records',
'create_zone', 'update_zone', 'create_record', 'update_record',
'delete_zone', 'delete_record']
}
FRIENDLY_METHODS_NAMES = {
'compute_main': {
'list_nodes': 'list nodes',
'create_node': 'create node',
'reboot_node': 'reboot node',
'destroy_node': 'destroy node',
'list_images': 'list images',
'list_sizes': 'list sizes',
'deploy_node': 'deploy node'
},
'compute_image_management': {
'list_images': 'list images',
'get_image': 'get image',
'create_image': 'create image',
'copy_image': 'copy image',
'delete_image': 'delete image'
},
'compute_block_storage': {
'list_volumes': 'list volumes',
'create_volume': 'create volume',
'destroy_volume': 'destroy volume',
'attach_volume': 'attach volume',
'detach_volume': 'detach volume',
'list_volume_snapshots': 'list snapshots',
'create_volume_snapshot': 'create snapshot'
},
'compute_key_pair_management': {
'list_key_pairs': 'list key pairs',
'get_key_pair': 'get key pair',
'create_key_pair': 'create key pair',
'import_key_pair_from_string': 'import public key from string',
'import_key_pair_from_file': 'import public key from file',
'delete_key_pair': 'delete key pair'
},
'loadbalancer': {
'create_balancer': 'create balancer',
'list_balancers': 'list balancers',
'balancer_list_members': 'list members',
'balancer_attach_member': 'attach member',
'balancer_detach_member': 'detach member',
'balancer_attach_compute_node': 'attach compute node'
},
'storage_main': {
'list_containers': 'list containers',
'list_container_objects': 'list objects',
'create_container': 'create container',
'delete_container': 'delete container',
'upload_object': 'upload object',
'upload_object_via_stream': 'streaming object upload',
'download_object': 'download object',
'download_object_as_stream': 'streaming object download',
'delete_object': 'delete object'
},
'storage_cdn': {
'enable_container_cdn': 'enable container cdn',
'enable_object_cdn': 'enable object cdn',
'get_container_cdn_url': 'get container cdn URL',
'get_object_cdn_url': 'get object cdn URL',
},
'dns': {
'list_zones': 'list zones',
'list_records': 'list records',
'create_zone': 'create zone',
'update_zone': 'update zone',
'create_record': 'create record',
'update_record': 'update record',
'delete_zone': 'delete zone',
'delete_record': 'delete record'
},
}
IGNORED_PROVIDERS = [
'dummy',
'local',
# Deprecated constants
'cloudsigma_us',
'cloudfiles_swift'
]
def get_provider_api_names(Provider):
names = [key for key, value in Provider.__dict__.items() if
not key.startswith('__')]
return names
def generate_providers_table(api):
result = {}
if api in ['compute_main', 'compute_image_management',
'compute_block_storage', 'compute_key_pair_management']:
driver = NodeDriver
drivers = COMPUTE_DRIVERS
provider = ComputeProvider
get_driver_method = get_compute_driver
elif api == 'loadbalancer':
driver = LBDriver
drivers = LB_DRIVERS
provider = LBProvider
get_driver_method = get_lb_driver
elif api in ['storage_main', 'storage_cdn']:
driver = StorageDriver
drivers = STORAGE_DRIVERS
provider = StorageProvider
get_driver_method = get_storage_driver
elif api == 'dns':
driver = DNSDriver
drivers = DNS_DRIVERS
provider = DNSProvider
get_driver_method = get_dns_driver
else:
raise Exception('Invalid api: %s' % (api))
names = get_provider_api_names(provider)
result = OrderedDict()
for name in names:
enum = getattr(provider, name)
try:
cls = get_driver_method(enum)
except:
# Deprecated providers throw an exception
continue
# Hack for providers which expose multiple classes and support multiple
# API versions
# TODO: Make entry per version
if name.lower() == 'cloudsigma':
from libcloud.compute.drivers.cloudsigma import \
CloudSigma_2_0_NodeDriver
cls = CloudSigma_2_0_NodeDriver
elif name.lower() == 'opennebula':
from libcloud.compute.drivers.opennebula import \
OpenNebula_3_8_NodeDriver
cls = OpenNebula_3_8_NodeDriver
if name.lower() in IGNORED_PROVIDERS:
continue
driver_methods = dict(inspect.getmembers(cls,
predicate=inspect.ismethod))
base_methods = dict(inspect.getmembers(driver,
predicate=inspect.ismethod))
base_api_methods = BASE_API_METHODS[api]
result[name] = {'name': cls.name, 'website': cls.website,
'constant': name, 'module': drivers[enum][0],
'class': drivers[enum][1],
'methods': {}}
for method_name in base_api_methods:
base_method = base_methods[method_name]
driver_method = driver_methods[method_name]
if method_name == 'deploy_node':
features = getattr(cls, 'features', {}).get('create_node', [])
is_implemented = len(features) >= 1
else:
is_implemented = (id(driver_method.im_func) !=
id(base_method.im_func))
result[name]['methods'][method_name] = is_implemented
return result
def generate_rst_table(data):
cols = len(data[0])
col_len = [max(len(r[i]) for r in data) for i in range(cols)]
formatter = ' '.join('{:<%d}' % c for c in col_len)
header = formatter.format(*['=' * c for c in col_len])
rows = [formatter.format(*row) for row in data]
result = header + '\n' + rows[0] + '\n' + header + '\n' +\
'\n'.join(rows[1:]) + '\n' + header
return result
def generate_supported_methods_table(api, provider_matrix):
base_api_methods = BASE_API_METHODS[api]
data = []
header = [FRIENDLY_METHODS_NAMES[api][method_name] for method_name in
base_api_methods if not method_name.startswith('iterate_')]
data.append(['Provider'] + header)
for provider, values in sorted(provider_matrix.items()):
provider_name = '`%s`_' % (values['name'])
row = [provider_name]
# TODO: Make it nicer
# list_* methods don't need to be implemented if iterate_* methods are
# implemented
if api == 'storage_main':
if values['methods']['iterate_containers']:
values['methods']['list_containers'] = True
if values['methods']['iterate_container_objects']:
values['methods']['list_container_objects'] = True
elif api == 'dns':
# list_zones and list_records don't need to be implemented if
if values['methods']['iterate_zones']:
values['methods']['list_zones'] = True
if values['methods']['iterate_records']:
values['methods']['list_records'] = True
for method in base_api_methods:
# TODO: ghetto
if method.startswith('iterate_'):
continue
supported = values['methods'][method]
if supported:
row.append('yes')
else:
row.append('no')
data.append(row)
result = generate_rst_table(data)
result += '\n\n'
for provider, values in sorted(provider_matrix.items()):
result += '.. _`%s`: %s\n' % (values['name'], values['website'])
return result
def generate_supported_providers_table(api, provider_matrix):
data = []
header = ['Provider', 'Documentation', 'Provider constant', 'Module',
'Class Name']
data.append(header)
for provider, values in sorted(provider_matrix.items()):
name_str = '`%s`_' % (values['name'])
module_str = ':mod:`%s`' % (values['module'])
class_str = ':class:`%s`' % (values['class'])
params = {'api': api, 'provider': provider.lower()}
driver_docs_path = pjoin(this_dir,
'../docs/%(api)s/drivers/%(provider)s.rst'
% params)
if os.path.exists(driver_docs_path):
docs_link = ':doc:`Click </%(api)s/drivers/%(provider)s>`' % params
else:
docs_link = ''
row = [name_str, docs_link, values['constant'], module_str, class_str]
data.append(row)
result = generate_rst_table(data)
result += '\n\n'
for provider, values in sorted(provider_matrix.items()):
result += '.. _`%s`: %s\n' % (values['name'], values['website'])
return result
def generate_tables():
apis = BASE_API_METHODS.keys()
for api in apis:
result = generate_providers_table(api)
docs_dir = api
if api.startswith('compute'):
docs_dir = 'compute'
elif api.startswith('storage'):
docs_dir = 'storage'
supported_providers = generate_supported_providers_table(docs_dir,
result)
supported_methods = generate_supported_methods_table(api, result)
current_path = os.path.dirname(__file__)
target_dir = os.path.abspath(pjoin(current_path,
'../docs/%s/' % (docs_dir)))
file_name_1 = '_supported_providers.rst'
file_name_2 = '_supported_methods.rst'
if api == 'compute_main':
file_name_2 = '_supported_methods_main.rst'
elif api == 'compute_image_management':
file_name_2 = '_supported_methods_image_management.rst'
elif api == 'compute_block_storage':
file_name_2 = '_supported_methods_block_storage.rst'
elif api == 'compute_key_pair_management':
file_name_2 = '_supported_methods_key_pair_management.rst'
elif api == 'storage_main':
file_name_2 = '_supported_methods_main.rst'
elif api == 'storage_cdn':
file_name_2 = '_supported_methods_cdn.rst'
supported_providers_path = pjoin(target_dir, file_name_1)
supported_methods_path = pjoin(target_dir, file_name_2)
with open(supported_providers_path, 'w') as fp:
fp.write(supported_providers)
with open(supported_methods_path, 'w') as fp:
fp.write(supported_methods)
generate_tables()
| |
"""
Get API information encoded in C files.
See ``find_function`` for how functions should be formatted, and
``read_order`` for how the order of the functions should be
specified.
"""
import sys, os, re
try:
import hashlib
md5new = hashlib.md5
except ImportError:
import md5
md5new = md5.new
if sys.version_info[:2] < (2, 6):
from sets import Set as set
import textwrap
from os.path import join
__docformat__ = 'restructuredtext'
# The files under src/ that are scanned for API functions
API_FILES = [join('multiarray', 'array_assign_array.c'),
join('multiarray', 'array_assign_scalar.c'),
join('multiarray', 'arrayobject.c'),
join('multiarray', 'arraytypes.c.src'),
join('multiarray', 'buffer.c'),
join('multiarray', 'calculation.c'),
join('multiarray', 'conversion_utils.c'),
join('multiarray', 'convert.c'),
join('multiarray', 'convert_datatype.c'),
join('multiarray', 'ctors.c'),
join('multiarray', 'datetime.c'),
join('multiarray', 'datetime_busday.c'),
join('multiarray', 'datetime_busdaycal.c'),
join('multiarray', 'datetime_strings.c'),
join('multiarray', 'descriptor.c'),
join('multiarray', 'einsum.c.src'),
join('multiarray', 'flagsobject.c'),
join('multiarray', 'getset.c'),
join('multiarray', 'item_selection.c'),
join('multiarray', 'iterators.c'),
join('multiarray', 'methods.c'),
join('multiarray', 'multiarraymodule.c'),
join('multiarray', 'nditer_api.c'),
join('multiarray', 'nditer_constr.c'),
join('multiarray', 'nditer_pywrap.c'),
join('multiarray', 'nditer_templ.c.src'),
join('multiarray', 'number.c'),
join('multiarray', 'refcount.c'),
join('multiarray', 'scalartypes.c.src'),
join('multiarray', 'scalarapi.c'),
join('multiarray', 'sequence.c'),
join('multiarray', 'shape.c'),
join('multiarray', 'usertypes.c'),
join('umath', 'loops.c.src'),
join('umath', 'ufunc_object.c'),
join('umath', 'ufunc_type_resolution.c'),
join('umath', 'reduction.c'),
]
THIS_DIR = os.path.dirname(__file__)
API_FILES = [os.path.join(THIS_DIR, '..', 'src', a) for a in API_FILES]
def file_in_this_dir(filename):
return os.path.join(THIS_DIR, filename)
def remove_whitespace(s):
return ''.join(s.split())
def _repl(str):
return str.replace('Bool','npy_bool')
class Function(object):
def __init__(self, name, return_type, args, doc=''):
self.name = name
self.return_type = _repl(return_type)
self.args = args
self.doc = doc
def _format_arg(self, typename, name):
if typename.endswith('*'):
return typename + name
else:
return typename + ' ' + name
def __str__(self):
argstr = ', '.join([self._format_arg(*a) for a in self.args])
if self.doc:
doccomment = '/* %s */\n' % self.doc
else:
doccomment = ''
return '%s%s %s(%s)' % (doccomment, self.return_type, self.name, argstr)
def to_ReST(self):
lines = ['::', '', ' ' + self.return_type]
argstr = ',\000'.join([self._format_arg(*a) for a in self.args])
name = ' %s' % (self.name,)
s = textwrap.wrap('(%s)' % (argstr,), width=72,
initial_indent=name,
subsequent_indent=' ' * (len(name)+1),
break_long_words=False)
for l in s:
lines.append(l.replace('\000', ' ').rstrip())
lines.append('')
if self.doc:
lines.append(textwrap.dedent(self.doc))
return '\n'.join(lines)
def api_hash(self):
m = md5new()
m.update(remove_whitespace(self.return_type))
m.update('\000')
m.update(self.name)
m.update('\000')
for typename, name in self.args:
m.update(remove_whitespace(typename))
m.update('\000')
return m.hexdigest()[:8]
class ParseError(Exception):
def __init__(self, filename, lineno, msg):
self.filename = filename
self.lineno = lineno
self.msg = msg
def __str__(self):
return '%s:%s:%s' % (self.filename, self.lineno, self.msg)
def skip_brackets(s, lbrac, rbrac):
count = 0
for i, c in enumerate(s):
if c == lbrac:
count += 1
elif c == rbrac:
count -= 1
if count == 0:
return i
raise ValueError("no match '%s' for '%s' (%r)" % (lbrac, rbrac, s))
def split_arguments(argstr):
arguments = []
bracket_counts = {'(': 0, '[': 0}
current_argument = []
state = 0
i = 0
def finish_arg():
if current_argument:
argstr = ''.join(current_argument).strip()
m = re.match(r'(.*(\s+|[*]))(\w+)$', argstr)
if m:
typename = m.group(1).strip()
name = m.group(3)
else:
typename = argstr
name = ''
arguments.append((typename, name))
del current_argument[:]
while i < len(argstr):
c = argstr[i]
if c == ',':
finish_arg()
elif c == '(':
p = skip_brackets(argstr[i:], '(', ')')
current_argument += argstr[i:i+p]
i += p-1
else:
current_argument += c
i += 1
finish_arg()
return arguments
def find_functions(filename, tag='API'):
"""
Scan the file, looking for tagged functions.
Assuming ``tag=='API'``, a tagged function looks like::
/*API*/
static returntype*
function_name(argtype1 arg1, argtype2 arg2)
{
}
where the return type must be on a separate line, the function
name must start the line, and the opening ``{`` must start the line.
An optional documentation comment in ReST format may follow the tag,
as in::
/*API
This function does foo...
*/
"""
fo = open(filename, 'r')
functions = []
return_type = None
function_name = None
function_args = []
doclist = []
SCANNING, STATE_DOC, STATE_RETTYPE, STATE_NAME, STATE_ARGS = range(5)
state = SCANNING
tagcomment = '/*' + tag
for lineno, line in enumerate(fo):
try:
line = line.strip()
if state == SCANNING:
if line.startswith(tagcomment):
if line.endswith('*/'):
state = STATE_RETTYPE
else:
state = STATE_DOC
elif state == STATE_DOC:
if line.startswith('*/'):
state = STATE_RETTYPE
else:
line = line.lstrip(' *')
doclist.append(line)
elif state == STATE_RETTYPE:
# first line of declaration with return type
m = re.match(r'NPY_NO_EXPORT\s+(.*)$', line)
if m:
line = m.group(1)
return_type = line
state = STATE_NAME
elif state == STATE_NAME:
# second line, with function name
m = re.match(r'(\w+)\s*\(', line)
if m:
function_name = m.group(1)
else:
raise ParseError(filename, lineno+1,
'could not find function name')
function_args.append(line[m.end():])
state = STATE_ARGS
elif state == STATE_ARGS:
if line.startswith('{'):
# finished
fargs_str = ' '.join(function_args).rstrip(' )')
fargs = split_arguments(fargs_str)
f = Function(function_name, return_type, fargs,
'\n'.join(doclist))
functions.append(f)
return_type = None
function_name = None
function_args = []
doclist = []
state = SCANNING
else:
function_args.append(line)
except:
print(filename, lineno+1)
raise
fo.close()
return functions
def should_rebuild(targets, source_files):
from distutils.dep_util import newer_group
for t in targets:
if not os.path.exists(t):
return True
sources = API_FILES + list(source_files) + [__file__]
if newer_group(sources, targets[0], missing='newer'):
return True
return False
# Those *Api classes instances know how to output strings for the generated code
class TypeApi(object):
def __init__(self, name, index, ptr_cast, api_name):
self.index = index
self.name = name
self.ptr_cast = ptr_cast
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s (*(%s *)%s[%d])" % (self.name,
self.ptr_cast,
self.api_name,
self.index)
def array_api_define(self):
return " (void *) &%s" % self.name
def internal_define(self):
astr = """\
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT PyTypeObject %(type)s;
#else
NPY_NO_EXPORT PyTypeObject %(type)s;
#endif
""" % {'type': self.name}
return astr
class GlobalVarApi(object):
def __init__(self, name, index, type, api_name):
self.name = name
self.index = index
self.type = type
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s (*(%s *)%s[%d])" % (self.name,
self.type,
self.api_name,
self.index)
def array_api_define(self):
return " (%s *) &%s" % (self.type, self.name)
def internal_define(self):
astr = """\
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT %(type)s %(name)s;
#else
NPY_NO_EXPORT %(type)s %(name)s;
#endif
""" % {'type': self.type, 'name': self.name}
return astr
# Dummy to be able to consistently use *Api instances for all items in the
# array api
class BoolValuesApi(object):
def __init__(self, name, index, api_name):
self.name = name
self.index = index
self.type = 'PyBoolScalarObject'
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s ((%s *)%s[%d])" % (self.name,
self.type,
self.api_name,
self.index)
def array_api_define(self):
return " (void *) &%s" % self.name
def internal_define(self):
astr = """\
#ifdef NPY_ENABLE_SEPARATE_COMPILATION
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#else
NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#endif
"""
return astr
class FunctionApi(object):
def __init__(self, name, index, return_type, args, api_name):
self.name = name
self.index = index
self.return_type = return_type
self.args = args
self.api_name = api_name
def _argtypes_string(self):
if not self.args:
return 'void'
argstr = ', '.join([_repl(a[0]) for a in self.args])
return argstr
def define_from_array_api_string(self):
define = """\
#define %s \\\n (*(%s (*)(%s)) \\
%s[%d])""" % (self.name,
self.return_type,
self._argtypes_string(),
self.api_name,
self.index)
return define
def array_api_define(self):
return " (void *) %s" % self.name
def internal_define(self):
astr = """\
NPY_NO_EXPORT %s %s \\\n (%s);""" % (self.return_type,
self.name,
self._argtypes_string())
return astr
def order_dict(d):
"""Order dict by its values."""
o = d.items()
def _key(x):
return (x[1], x[0])
return sorted(o, key=_key)
def merge_api_dicts(dicts):
ret = {}
for d in dicts:
for k, v in d.items():
ret[k] = v
return ret
def check_api_dict(d):
"""Check that an api dict is valid (does not use the same index twice)."""
# We have if a same index is used twice: we 'revert' the dict so that index
# become keys. If the length is different, it means one index has been used
# at least twice
revert_dict = dict([(v, k) for k, v in d.items()])
if not len(revert_dict) == len(d):
# We compute a dict index -> list of associated items
doubled = {}
for name, index in d.items():
try:
doubled[index].append(name)
except KeyError:
doubled[index] = [name]
msg = """\
Same index has been used twice in api definition: %s
""" % ['index %d -> %s' % (index, names) for index, names in doubled.items() \
if len(names) != 1]
raise ValueError(msg)
# No 'hole' in the indexes may be allowed, and it must starts at 0
indexes = set(d.values())
expected = set(range(len(indexes)))
if not indexes == expected:
diff = expected.symmetric_difference(indexes)
msg = "There are some holes in the API indexing: " \
"(symmetric diff is %s)" % diff
raise ValueError(msg)
def get_api_functions(tagname, api_dict):
"""Parse source files to get functions tagged by the given tag."""
functions = []
for f in API_FILES:
functions.extend(find_functions(f, tagname))
dfunctions = []
for func in functions:
o = api_dict[func.name]
dfunctions.append( (o, func) )
dfunctions.sort()
return [a[1] for a in dfunctions]
def fullapi_hash(api_dicts):
"""Given a list of api dicts defining the numpy C API, compute a checksum
of the list of items in the API (as a string)."""
a = []
for d in api_dicts:
def sorted_by_values(d):
"""Sort a dictionary by its values. Assume the dictionary items is of
the form func_name -> order"""
return sorted(d.items(), key=lambda x_y: (x_y[1], x_y[0]))
for name, index in sorted_by_values(d):
a.extend(name)
a.extend(str(index))
return md5new(''.join(a).encode('ascii')).hexdigest()
# To parse strings like 'hex = checksum' where hex is e.g. 0x1234567F and
# checksum a 128 bits md5 checksum (hex format as well)
VERRE = re.compile('(^0x[\da-f]{8})\s*=\s*([\da-f]{32})')
def get_versions_hash():
d = []
file = os.path.join(os.path.dirname(__file__), 'cversions.txt')
fid = open(file, 'r')
try:
for line in fid.readlines():
m = VERRE.match(line)
if m:
d.append((int(m.group(1), 16), m.group(2)))
finally:
fid.close()
return dict(d)
def main():
tagname = sys.argv[1]
order_file = sys.argv[2]
functions = get_api_functions(tagname, order_file)
m = md5new(tagname)
for func in functions:
print(func)
ah = func.api_hash()
m.update(ah)
print(hex(int(ah,16)))
print(hex(int(m.hexdigest()[:8],16)))
if __name__ == '__main__':
main()
| |
import lexical_analyzer
class Parser():
def __init__(self):
self.lexemes = []
self.lookahead = None
def nextLexeme(self):
if self.lexemes:
self.lookahead = self.lexemes.pop(0)
#print(self.lookahead[1])
def assert_next(self, expected_value, error_message):
if self.lookahead[0] == expected_value:
self.nextLexeme()
else:
print(error_message + ' before ' + self.lookahead[1])
def assert_delimiter(self):
self.assert_next('SEMICOLON_KEYWORD', 'expected semicolon')
def check_next(self, expected_values):
if len(expected_values) == 1:
return self.lookahead[0] == expected_values[0]
for value in expected_values:
if self.lookahead[0] == value:
return True
return False
def parse(self, lexemes):
self.lexemes = lexemes
self.nextLexeme()
while self.lexemes:
self.function()
def codeblock(self):
self.assert_next('OPEN_CURLY_BRACE_KEYWORD', 'expected {')
while not self.check_next(['CLOSE_CURLY_BRACE_KEYWORD']):
self.statement()
self.assert_next('CLOSE_CURLY_BRACE_KEYWORD', 'expected }')
def statement(self):
# STATEMENT := EXPRESSION | INPUT | OUTPUT | COMMENT | IFSTMT | SWITCHSTMT | LOOPSTMT | FUNCTIONDEC | RETURN | INCREMENT | DECREMENT | break | continue
if self.check_next(['INPUT_KEYWORD']):
self.input()
elif self.check_next(['OUTPUT_KEYWORD']):
self.output()
elif self.check_next(['VAR_KEYWORD']):
self.vardec()
elif self.check_next(['SINGLE_LINE_COMMENT']):
self.nextLexeme()
elif self.check_next(['IF_KEYWORD']):
self.ifstmt()
elif self.check_next(['SWITCH_KEYWORD']):
self.switch()
elif self.check_next(['WHILE_KEYWORD']):
self.while_loop()
elif self.check_next(['DO_KEYWORD']):
self.do_while_loop()
elif self.check_next(['FOR_KEYWORD']):
self.for_loop()
elif self.check_next(['FOREACH_KEYWORD']):
self.foreach_loop()
elif self.check_next(['FUNCTION_KEYWORD']):
self.function()
elif self.check_next(['RETURN_KEYWORD']):
self.returnstmt()
elif self.check_next(['BREAK_KEYWORD']):
self.breakstmt()
elif self.check_next(['CONTINUE_KEYWORD']):
self.continuestmt()
def input(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
self.assert_next('IDENTIFIER', 'expected identifier')
if self.check_next(['COMMA_KEYWORD']):
self.nextLexeme()
self.assert_next('STRING_LITERAL', 'expected string literal')
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_delimiter()
def output(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_delimiter()
def vardec(self):
self.nextLexeme()
self.assert_next('IDENTIFIER', 'expected identifier')
if self.check_next(['EQUAL_SIGN_KEYWORD']):
self.nextLexeme()
if self.check_next(['OPEN_BRACKET_KEYWORD']):
self.nextLexeme()
while not self.check_next(['CLOSE_BRACKET_KEYWORD']):
self.expression()
if not self.check_next(['CLOSE_BRACKET_KEYWORD']):
self.assert_next('COMMA_KEYWORD', 'expected comma')
self.assert_next('CLOSE_BRACKET_KEYWORD', 'expected ]')
else:
self.expression()
self.assert_delimiter()
def ifstmt(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.codeblock()
if self.check_next(['ELSIF_KEYWORD']):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.codeblock()
if self.check_next(['ELSE_KEYWORD']):
self.nextLexeme()
self.codeblock()
def switch(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_next('OPEN_CURLY_BRACE_KEYWORD', 'expected {')
while not self.check_next(['CLOSE_CURLY_BRACE_KEYWORD']):
self.caseblock()
if self.check_next(['DEFAULT_KEYWORD']):
break
if self.check_next(['DEFAULT_KEYWORD']):
self.nextLexeme()
self.codeblock()
self.assert_next('CLOSE_CURLY_BRACE_KEYWORD', 'expected }')
def caseblock(self):
self.assert_next('CASE_KEYWORD', 'expected case')
if self.check_next(['STRING_LITERAL', 'INTEGER_LITERAL', 'FLOAT_LITERAL']):
self.literal()
else:
print('expected literal at ' + self.lookahead[1])
self.assert_next('INTEGER_LITERAL', 'expected literal')
self.codeblock()
def while_loop(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.codeblock()
def do_while_loop(self):
self.nextLexeme()
self.codeblock()
self.assert_next('WHILE_KEYWORD', 'expected while')
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
self.assert_delimiter()
def for_loop(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
self.expression()
self.assert_delimiter()
self.expression()
self.assert_delimiter()
self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected (')
self.codeblock()
def foreach_loop(self):
self.nextLexeme()
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
self.assert_next('IDENTIFIER', 'expected identifier')
self.assert_next('IN_KEYWORD', 'expected in')
self.assert_next('IDENTIFIER', 'expected identifier')
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected (')
self.codeblock()
def function(self):
self.nextLexeme()
self.assert_next('IDENTIFIER', 'expected function identifier')
self.assert_next('OPEN_PARENTHESIS_KEYWORD', 'expected (')
while not self.check_next(['CLOSE_PARENTHESIS_KEYWORD']):
self.assert_next('IDENTIFIER', 'expected identifier')
if not self.check_next(['CLOSE_PARENTHESIS_KEYWORD']):
self.assert_next('COMMA_KEYWORD', 'expected comma')
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected (')
self.codeblock()
def returnstmt(self):
self.nextLexeme()
self.expression()
self.assert_delimiter()
def breakstmt(self):
self.nextLexeme()
self.assert_delimiter()
def continuestmt(self):
self.nextLexeme()
self.assert_delimiter()
def expression(self):
self.operation()
def operation(self):
if self.term():
if self.operator():
self.nextLexeme()
if self.operation():
return True
else:
return False
else:
return True
else:
print('error at ' + self.lookahead[1])
return False
def term(self):
if self.operand():
if self.operator():
self.nextLexeme()
if self.term():
return True
else:
return False
else:
return True
else:
print('error at ' + self.lookahead[1])
return False
def operand(self):
if self.check_next(['OPEN_PARENTHESIS_KEYWORD']):
self.nextLexeme()
self.expression()
self.assert_next('CLOSE_PARENTHESIS_KEYWORD', 'expected )')
return True
else:
if self.literal() or self.variable():
self.nextLexeme()
return True
else:
print('error at ' + self.lookahead[1])
return False
def operator(self):
return self.check_next(['PLUS_KEYWORD', 'MINUS_KEYWORD', 'MULTIPLY_KEYWORD', 'DIVIDE_KEYWORD', 'MODULO_KEYWORD'])
def literal(self):
return self.check_next(['INTEGER_LITERAL', 'FLOAT_LITERAL', 'STRING_LITERAL', 'TRUE_KEYWORD', 'FALSE_KEYWORD'])
def variable(self):
return self.check_next(['IDENTIFIER'])
dfa = lexical_analyzer.create_DFA()
code = open('sample.ric', 'r').read().strip()
lexemes = dfa.tokenize(code)
print(lexemes)
parser = Parser()
parser.parse(lexemes)
| |
#!/usr/bin/env python
# make the other metrics work
# generate the txt files, then work on the pdf otuput
__version__ = "0.1.0"
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use('pdf')
import matplotlib.pyplot as plt
import sys
import os
import re
import networkx as nx
import tdec.PHRG as phrg
import tdec.tree_decomposition as td
import tdec.probabilistic_cfg as pcfg
import tdec.net_metrics as metrics
import tdec.load_edgelist_from_dataframe as tdf
import pprint as pp
import argparse, traceback
import tdec.graph_sampler as gs
DBG = False
#~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~#~##~#~#~#~100
def get_parser ():
parser = argparse.ArgumentParser(description='Infer a model given a graph (derive a model)')
parser.add_argument('--orig', required=True, nargs=1, help='Filename of edgelist graph')
parser.add_argument('--chunglu', help='Generate chunglu graphs',action='store_true')
parser.add_argument('--kron', help='Generate Kronecker product graphs',action='store_true')
parser.add_argument('--samp', help='Sample sg>dur>gg2targetN', action='store_true')
parser.add_argument('-tw', action='store_true', default=False, required=False, help="print xphrg mcs tw")
parser.add_argument('-prs', action='store_true', default=False, required=False, help="stop at prs")
parser.add_argument('--version', action='version', version=__version__)
return parser
def nslog(arb_str):
print "~^."*20
print "\t", arb_str.split("_")
print
def Hstar_Graphs_Control (G, graph_name, axs=None):
# Derive the prod rules in a naive way, where
prod_rules = phrg.probabilistic_hrg_learning(G)
pp.pprint(prod_rules)
exit()
g = pcfg.Grammar('S')
for (id, lhs, rhs, prob) in prod_rules:
g.add_rule(pcfg.Rule(id, lhs, rhs, prob))
num_nodes = G.number_of_nodes()
print "Starting max size", 'n=', num_nodes
g.set_max_size(num_nodes)
print "Done with max size"
Hstars = []
num_samples = 20
print '*' * 40
for i in range(0, num_samples):
rule_list = g.sample(num_nodes)
hstar = phrg.grow(rule_list, g)[0]
Hstars.append(hstar)
# if 0:
# g = nx.from_pandas_dataframe(df, 'src', 'trg', edge_attr=['ts'])
# draw_degree_whole_graph(g,axs)
# draw_degree(Hstars, axs=axs, col='r')
# #axs.set_title('Rules derived by ignoring time')
# axs.set_ylabel('Frequency')
# axs.set_xlabel('degree')
if 0:
# metricx = [ 'degree','hops', 'clust', 'assort', 'kcore','eigen','gcd']
metricx = ['gcd']
# g = nx.from_pandas_dataframe(df, 'src', 'trg',edge_attr=['ts'])
# graph_name = os.path.basename(f_path).rstrip('.tel')
if DBG: print ">", graph_name
metrics.network_properties([G], metricx, Hstars, name=graph_name, out_tsv=True)
def pandas_dataframes_from_edgelists (el_files):
if (el_files is None): return
list_of_dataframes = []
for f in el_files:
print '~' * 80
print f
temporal_graph = False
with open(f, 'r') as ifile:
line = ifile.readline()
while (not temporal_graph):
if ("%" in line):
line = ifile.readline()
elif len(line.split()) > 3:
temporal_graph = True
if (temporal_graph):
dat = np.genfromtxt(f, dtype=np.int64, comments='%', delimiter="\t", usecols=[0, 1, 3], autostrip=True)
df = pd.DataFrame(dat, columns=['src', 'trg', 'ts'])
else:
dat = np.genfromtxt(f, dtype=np.int64, comments='%', delimiter="\t", usecols=[0, 1], autostrip=True)
df = pd.DataFrame(dat, columns=['src', 'trg'])
df = df.drop_duplicates()
list_of_dataframes.append(df)
return list_of_dataframes
def grow_exact_size_hrg_graphs_from_prod_rules(prod_rules, gname, n, runs=1):
"""
Args:
rules: production rules (model)
gname: graph name
n: target graph order (number of nodes)
runs: how many graphs to generate
Returns: list of synthetic graphs
"""
nslog("grow_exact_size_hrg_graphs_from_prod_rules")
DBG = True
if n <=0: sys.exit(1)
g = pcfg.Grammar('S')
for (id, lhs, rhs, prob) in prod_rules:
g.add_rule(pcfg.Rule(id, lhs, rhs, prob))
print
print "Added rules HRG (pr", len(prod_rules),", n,", n,")"
exit() # temp pls remove me
num_nodes = n
if DBG: print "Starting max size"
g.set_max_size(num_nodes)
if DBG: print "Done with max size"
hstars_lst = []
print " ",
for i in range(0, runs):
print '>',
rule_list = g.sample(num_nodes)
hstar = phrg.grow(rule_list, g)[0]
hstars_lst.append(hstar)
return hstars_lst
def pwrlaw_plot (xdata, ydata, yerr):
from scipy import linspace, randn, log10, optimize, sqrt
powerlaw = lambda x, amp, index: amp * (x**index)
logx = log10(xdata)
logy = log10(ydata)
logyerr = yerr / ydata
# define our (line) fitting function
fitfunc = lambda p, x: p[0] + p[1] * x
errfunc = lambda p, x, y, err: (y - fitfunc(p, x)) / err
pinit = [1.0, -1.0]
out = optimize.leastsq(errfunc, pinit,
args=(logx, logy, logyerr), full_output=1)
pfinal = out[0]
covar = out[1]
print pfinal
print covar
index = pfinal[1]
amp = 10.0**pfinal[0]
indexErr = sqrt( covar[0][0] )
ampErr = sqrt( covar[1][1] ) * amp
print index
# ########
# plotting
# ########
# ax.plot(ydata)
# ax.plot(pl_sequence)
fig, axs = plt.subplots(2,1)
axs[0].plot(xdata, powerlaw(xdata, amp, index)) # Fit
axs[0].errorbar(xdata, ydata, yerr=yerr, fmt='k.') # Data
(yh1,yh2) = (axs[0].get_ylim()[1]*.9, axs[0].get_ylim()[1]*.8)
xh = axs[0].get_xlim()[0]*1.1
print axs[0].get_ylim()
print (yh1,yh2)
axs[0].text(xh, yh1, 'Ampli = %5.2f +/- %5.2f' % (amp, ampErr))
axs[0].text(xh, yh2, 'Index = %5.2f +/- %5.2f' % (index, indexErr))
axs[0].set_title('Best Fit Power Law')
axs[0].set_xlabel('X')
axs[0].set_ylabel('Y')
# xlim(1, 11)
#
# subplot(2, 1, 2)
axs[1].loglog(xdata, powerlaw(xdata, amp, index))
axs[1].errorbar(xdata, ydata, yerr=yerr, fmt='k.') # Data
axs[1].set_xlabel('X (log scale)')
axs[1].set_ylabel('Y (log scale)')
import datetime
figfname = datetime.datetime.now().strftime("%d%b%y")+"_pl"
plt.savefig(figfname, bbox_inches='tight')
return figfname
def deg_vcnt_to_disk(orig_graph, synthetic_graphs):
df = pd.DataFrame(orig_graph.degree().items())
gb = df.groupby([1]).count()
# gb.to_csv("Results/deg_orig_"+orig_graph.name+".tsv", sep='\t', header=True)
gb.index.rename('k',inplace=True)
gb.columns=['vcnt']
gb.to_csv("Results/deg_orig_"+orig_graph.name+".tsv", sep='\t', header=True)
# ## - group of synth graphs -
deg_df = pd.DataFrame()
for g in synthetic_graphs:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1]).count()
# Degree vs cnt
deg_df = pd.concat([deg_df, gb], axis=1) # Appends to bottom new DFs
# print gb
deg_df['mean'] = deg_df.mean(axis=1)
deg_df.index.rename('k',inplace=True)
deg_df['mean'].to_csv("Results/deg_xphrg_"+orig_graph.name+".tsv", sep='\t', header=True)
def plot_g_hstars(orig_graph, synthetic_graphs):
df = pd.DataFrame(orig_graph.degree().items())
gb = df.groupby([1]).count()
# gb.to_csv("Results/deg_orig_"+orig_graph.name+".tsv", sep='\t', header=True)
gb.index.rename('k',inplace=True)
gb.columns=['vcnt']
# k_cnt = [(x.tolist(),y.values[0]) for x,y in gb.iterrows()]
xdata = np.array([x.tolist() for x,y in gb.iterrows()])
ydata = np.array([y.values[0] for x,y in gb.iterrows()])
yerr = ydata *0.000001
fig, ax = plt.subplots()
ax.plot(gb.index.values, gb['vcnt'].values,'-o', markersize=8, markerfacecolor='w', markeredgecolor=[0,0,1], alpha=0.5, label="orig")
ofname = pwrlaw_plot(xdata, ydata,yerr)
if os.path.exists(ofname): print '... Plot save to:',ofname
deg_df = pd.DataFrame()
for g in synthetic_graphs:
d = g.degree()
df = pd.DataFrame.from_dict(d.items())
gb = df.groupby(by=[1]).count()
# Degree vs cnt
deg_df = pd.concat([deg_df, gb], axis=1) # Appends to bottom new DFs
# print gb
deg_df['mean'] = deg_df.mean(axis=1)
deg_df.index.rename('k',inplace=True)
# ax.plot(y=deg_df.mean(axis=1))
# ax.plot(y=deg_df.median(axis=1))
# ax.plot()
# orig
deg_df.mean(axis=1).plot(ax=ax,label='mean',color='r')
deg_df.median(axis=1).plot(ax=ax,label='median',color='g')
ax.fill_between(deg_df.index, deg_df.mean(axis=1) - deg_df.sem(axis=1),
deg_df.mean(axis=1) + deg_df.sem(axis=1), alpha=0.2, label="se")
# ax.plot(k_cnt)
# deg_df.plot(ax=ax)
# for x,y in k_cnt:
# if DBG: print "{}\t{}".format(x,y)
#
#
# for g in synths:
# df = pd.DataFrame(g.degree().items())
# gb = df.groupby([1]).count()
# # gb.plot(ax=ax)
# for x,y in k_cnt:
# if DBG: print "{}\t{}".format(x,y)
#
# # Curve-fit
#
plt.savefig('tmpfig', bbox_inches='tight')
def treewidth(parent, children,twlst ):
twlst.append(parent)
for x in children:
if isinstance(x, (tuple,list)):
treewidth(x[0],x[1],twlst)
else:
print type(x), len(x)
def print_treewdith(tree):
root, children = tree
print " computing tree width"
twdth=[]
treewidth(root, children,twdth)
print ' Treewidth:', np.max([len(x)-1 for x in twdth])
def get_hrg_production_rules(edgelist_data_frame, graph_name, tw=False, n_subg=2,n_nodes=300):
from tdec.growing import derive_prules_from
nslog("get_hrg_production_rules")
df = edgelist_data_frame
if df.shape[1] == 4:
G = nx.from_pandas_dataframe(df, 'src', 'trg', edge_attr=True) # whole graph
elif df.shape[1] ==3:
G = nx.from_pandas_dataframe(df, 'src', 'trg', ['ts']) # whole graph
else:
G = nx.from_pandas_dataframe(df, 'src', 'trg')
G.name = graph_name
G.remove_edges_from(G.selfloop_edges())
giant_nodes = max(nx.connected_component_subgraphs(G), key=len)
G = nx.subgraph(G, giant_nodes)
num_nodes = G.number_of_nodes()
phrg.graph_checks(G)
if DBG: print
if DBG: print "--------------------"
if not DBG: print "-Tree Decomposition-"
if DBG: print "--------------------"
prod_rules = {}
K = n_subg
n = n_nodes
if num_nodes >= 500:
print 'Grande'
for Gprime in gs.rwr_sample(G, K, n):
T = td.quickbb(Gprime)
root = list(T)[0]
T = td.make_rooted(T, root)
T = phrg.binarize(T)
root = list(T)[0]
root, children = T
#td.new_visit(T, G, prod_rules, TD)
td.new_visit(T, G, prod_rules)
else:
T = td.quickbb(G)
root = list(T)[0]
T = td.make_rooted(T, root)
T = phrg.binarize(T)
root = list(T)[0]
root, children = T
# td.new_visit(T, G, prod_rules, TD)
td.new_visit(T, G, prod_rules)
if tw:
print_treewidth(T)
exit()
## --
print ("prod_rules:",len(prod_rules), type(prod_rules))
if DBG: print
if DBG: print "--------------------"
if DBG: print "- Production Rules -"
if DBG: print "--------------------"
for k in prod_rules.iterkeys():
if DBG: print k
s = 0
for d in prod_rules[k]:
s += prod_rules[k][d]
for d in prod_rules[k]:
prod_rules[k][d] = float(prod_rules[k][d]) / float(s) # normailization step to create probs not counts.
if DBG: print '\t -> ', d, prod_rules[k][d]
rules = []
id = 0
for k, v in prod_rules.iteritems():
sid = 0
for x in prod_rules[k]:
rhs = re.findall("[^()]+", x)
rules.append(("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x]))
if DBG: print ("r%d.%d" % (id, sid), "%s" % re.findall("[^()]+", k)[0], rhs, prod_rules[k][x])
sid += 1
id += 1
df = pd.DataFrame(rules)
print "++++++++++"
df.to_csv('ProdRules/{}_prs.tsv'.format(G.name), header=False, index=False, sep="\t")
if os.path.exists('ProdRules/{}_prs.tsv'.format(G.name)):
print 'Saved', 'ProdRules/{}_prs.tsv'.format(G.name)
else:
print "Trouble saving"
print "-----------"
print [type(x) for x in rules[0]]
'''
Graph Generation of Synthetic Graphs
Grow graphs usigng the union of rules from sampled sugbgraphs to predict the target order of the
original graph
'''
hStars = grow_exact_size_hrg_graphs_from_prod_rules(rules, graph_name, G.number_of_nodes(),10)
print '... hStart graphs:',len(hStars)
if 0:
metricx = ['degree','hops', 'clust', 'assort', 'kcore','eigen','gcd']
metricx = ['gcd']
metrics.network_properties([G], metricx, hStars, name=graph_name, out_tsv=False)
if __name__ == '__main__':
parser = get_parser()
args = vars(parser.parse_args())
# load orig file into DF and get the dataset name into g_name
datframes = tdf.Pandas_DataFrame_From_Edgelist(args['orig'])
df = datframes[0]
g_name = [x for x in os.path.basename(args['orig'][0]).split('.') if len(x)>3][0]
if args['chunglu']:
print 'Generate chunglu graphs given an edgelist'
sys.exit(0)
elif args['kron']:
print 'Generate chunglu graphs given an edgelist'
sys.exit(0)
elif args['samp']:
print 'Sample K subgraphs of n nodes'
K = 500
n = 25
get_hrg_production_rules(df,g_name,n_subg=K, n_nodes=n)
else:
try:
get_hrg_production_rules(df,g_name, args['tw'])
except Exception, e:
print 'ERROR, UNEXPECTED SAVE PLOT EXCEPTION'
print str(e)
traceback.print_exc()
os._exit(1)
sys.exit(0)
| |
import fnmatch
from steam.core.msg.unified import get_um
from steam.core.msg.structs import get_struct
from steam.core.msg.headers import MsgHdr, ExtendedMsgHdr, MsgHdrProtoBuf, GCMsgHdr, GCMsgHdrProto
from steam.enums import EResult
from steam.enums.emsg import EMsg
from steam.exceptions import SteamError
from steam.core.msg.structs import StructMessage as _StructMessage
from google.protobuf.message import Message as _ProtoMessageType
from steam.protobufs import steammessages_base_pb2
from steam.protobufs import steammessages_clientserver_pb2
from steam.protobufs import steammessages_clientserver_2_pb2
from steam.protobufs import steammessages_clientserver_friends_pb2
from steam.protobufs import steammessages_clientserver_login_pb2
from steam.protobufs import steammessages_clientserver_appinfo_pb2
from steam.protobufs import steammessages_clientserver_gameservers_pb2
from steam.protobufs import steammessages_clientserver_lbs_pb2
from steam.protobufs import steammessages_clientserver_mms_pb2
from steam.protobufs import steammessages_clientserver_ucm_pb2
from steam.protobufs import steammessages_clientserver_uds_pb2
from steam.protobufs import steammessages_clientserver_ufs_pb2
from steam.protobufs import steammessages_clientserver_userstats_pb2
cmsg_lookup_predefined = {
EMsg.Multi: steammessages_base_pb2.CMsgMulti,
EMsg.ClientToGC: steammessages_clientserver_2_pb2.CMsgGCClient,
EMsg.ClientFromGC: steammessages_clientserver_2_pb2.CMsgGCClient,
EMsg.ClientServiceMethodLegacy: steammessages_clientserver_2_pb2.CMsgClientServiceMethodLegacy,
EMsg.ClientServiceMethodLegacyResponse: steammessages_clientserver_2_pb2.CMsgClientServiceMethodLegacyResponse,
EMsg.ClientGetNumberOfCurrentPlayersDP: steammessages_clientserver_2_pb2.CMsgDPGetNumberOfCurrentPlayers,
EMsg.ClientGetNumberOfCurrentPlayersDPResponse: steammessages_clientserver_2_pb2.CMsgDPGetNumberOfCurrentPlayersResponse,
# EMsg.ClientEmailChange4: steammessages_clientserver_2_pb2.CMsgClientEmailChange,
# EMsg.ClientEmailChangeResponse4: steammessages_clientserver_2_pb2.CMsgClientEmailChangeResponse,
EMsg.ClientLogonGameServer: steammessages_clientserver_login_pb2.CMsgClientLogon,
EMsg.ClientCurrentUIMode: steammessages_clientserver_2_pb2.CMsgClientUIMode,
EMsg.ClientChatOfflineMessageNotification: steammessages_clientserver_2_pb2.CMsgClientOfflineMessageNotification,
}
cmsg_lookup = dict()
for proto_module in [
steammessages_clientserver_pb2,
steammessages_clientserver_2_pb2,
steammessages_clientserver_friends_pb2,
steammessages_clientserver_login_pb2,
steammessages_clientserver_appinfo_pb2,
steammessages_clientserver_gameservers_pb2,
steammessages_clientserver_lbs_pb2,
steammessages_clientserver_mms_pb2,
steammessages_clientserver_ucm_pb2,
steammessages_clientserver_uds_pb2,
steammessages_clientserver_ufs_pb2,
steammessages_clientserver_userstats_pb2,
]:
cmsg_list = proto_module.__dict__
cmsg_list = fnmatch.filter(cmsg_list, 'CMsg*')
cmsg_lookup.update(dict(zip(map(lambda cmsg_name: cmsg_name.lower(), cmsg_list),
map(lambda cmsg_name: getattr(proto_module, cmsg_name), cmsg_list)
)))
def get_cmsg(emsg):
"""Get protobuf for a given EMsg
:param emsg: EMsg
:type emsg: :class:`steam.enums.emsg.EMsg`, :class:`int`
:return: protobuf message
"""
if not isinstance(emsg, EMsg):
emsg = EMsg(emsg)
if emsg in cmsg_lookup_predefined:
return cmsg_lookup_predefined[emsg]
else:
enum_name = emsg.name.lower()
if enum_name.startswith("econ"): # special case for 'EconTrading_'
enum_name = enum_name[4:]
cmsg_name = "cmsg" + enum_name
return cmsg_lookup.get(cmsg_name, None)
class Msg(object):
proto = False
body = None #: message instance
payload = None #: Will contain body payload, if we fail to find correct message class
def __init__(self, msg, data=None, extended=False, parse=True):
self.extended = extended
self.header = ExtendedMsgHdr(data) if extended else MsgHdr(data)
self.msg = msg
if data:
self.payload = data[self.header._size:]
if parse:
self.parse()
def parse(self):
"""Parses :attr:`payload` into :attr:`body` instance"""
if self.body is None:
deserializer = get_struct(self.msg)
if deserializer:
self.body = deserializer(self.payload)
self.payload = None
else:
self.body = '!!! Failed to resolve message !!!'
@property
def msg(self):
return self.header.msg
@msg.setter
def msg(self, value):
self.header.msg = EMsg(value)
def serialize(self):
return self.header.serialize() + self.body.serialize()
@property
def steamID(self):
return (self.header.steamID
if isinstance(self.header, ExtendedMsgHdr)
else None
)
@steamID.setter
def steamID(self, value):
if isinstance(self.header, ExtendedMsgHdr):
self.header.steamID = value
@property
def sessionID(self):
return (self.header.sessionID
if isinstance(self.header, ExtendedMsgHdr)
else None
)
@sessionID.setter
def sessionID(self, value):
if isinstance(self.header, ExtendedMsgHdr):
self.header.sessionID = value
def __repr__(self):
if isinstance(self.body, _StructMessage):
suffix = self.body.__class__.__name__
elif self.payload:
suffix = 'not parsed'
else:
suffix = 'n/a'
return "<Msg(%r | %s)>" % (self.msg, suffix)
def __str__(self):
rows = [repr(self)]
header = str(self.header)
rows.append("-------------- header --")
rows.append(header if header else "(empty)")
body = str(self.body)
rows.append("---------------- body --")
rows.append(body if body else "(empty)")
if self.payload:
rows.append("------------- payload --")
rows.append(repr(self.payload))
return '\n'.join(rows)
class MsgProto(object):
proto = True
body = None #: protobuf message instance
payload = None #: Will contain body payload, if we fail to find correct proto message
def __init__(self, msg, data=None, parse=True):
self._header = MsgHdrProtoBuf(data)
self.header = self._header.proto
self.msg = msg
if data:
self.payload = data[self._header._fullsize:]
if parse:
self.parse()
def parse(self):
"""Parses :attr:`payload` into :attr:`body` instance"""
if self.body is None:
if self.msg in (EMsg.ServiceMethod, EMsg.ServiceMethodResponse, EMsg.ServiceMethodSendToClient):
is_resp = False if self.msg == EMsg.ServiceMethod else True
proto = get_um(self.header.target_job_name, response=is_resp)
else:
proto = get_cmsg(self.msg)
if proto:
self.body = proto()
if self.payload:
self.body.ParseFromString(self.payload)
self.payload = None
else:
self.body = '!!! Failed to resolve message !!!'
@property
def msg(self):
return self._header.msg
@msg.setter
def msg(self, value):
self._header.msg = EMsg(value)
def serialize(self):
return self._header.serialize() + self.body.SerializeToString()
@property
def steamID(self):
return self.header.steamid
@steamID.setter
def steamID(self, value):
self.header.steamid = value
@property
def sessionID(self):
return self.header.client_sessionid
@sessionID.setter
def sessionID(self, value):
self.header.client_sessionid = value
def __repr__(self):
if isinstance(self.body, _ProtoMessageType):
suffix = self.body.__class__.__name__
elif self.payload:
suffix = 'not parsed'
else:
suffix = 'n/a'
return "<MsgProto(%r | %s)>" % (self.msg, suffix)
def __str__(self):
rows = [repr(self)]
header = str(self.header).rstrip()
rows.append("-------------- header --")
rows.append(header if header else "(empty)")
body = str(self.body).rstrip()
rows.append("---------------- body --")
rows.append(body if body else "(empty)")
if self.payload:
rows.append("------------- payload --")
rows.append(repr(self.payload))
return '\n'.join(rows)
| |
import os
import mimetypes
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from django.conf import settings
from django.core.files.base import File
from django.core.files.storage import Storage
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.utils.encoding import force_unicode, smart_str
try:
from boto.s3.connection import S3Connection, SubdomainCallingFormat
from boto.exception import S3ResponseError
from boto.s3.key import Key
except ImportError:
raise ImproperlyConfigured("Could not load Boto's S3 bindings.\n"
"See http://code.google.com/p/boto/")
ACCESS_KEY_NAME = getattr(settings, 'AWS_ACCESS_KEY_ID', None)
SECRET_KEY_NAME = getattr(settings, 'AWS_SECRET_ACCESS_KEY', None)
HEADERS = getattr(settings, 'AWS_HEADERS', {})
STORAGE_BUCKET_NAME = getattr(settings, 'AWS_STORAGE_BUCKET_NAME', None)
AUTO_CREATE_BUCKET = getattr(settings, 'AWS_AUTO_CREATE_BUCKET', False)
DEFAULT_ACL = getattr(settings, 'AWS_DEFAULT_ACL', 'public-read')
BUCKET_ACL = getattr(settings, 'AWS_BUCKET_ACL', DEFAULT_ACL)
QUERYSTRING_AUTH = getattr(settings, 'AWS_QUERYSTRING_AUTH', True)
QUERYSTRING_EXPIRE = getattr(settings, 'AWS_QUERYSTRING_EXPIRE', 3600)
REDUCED_REDUNDANCY = getattr(settings, 'AWS_REDUCED_REDUNDANCY', False)
LOCATION = getattr(settings, 'AWS_LOCATION', '')
CUSTOM_DOMAIN = getattr(settings, 'AWS_S3_CUSTOM_DOMAIN', None)
CALLING_FORMAT = getattr(settings, 'AWS_S3_CALLING_FORMAT', SubdomainCallingFormat())
SECURE_URLS = getattr(settings, 'AWS_S3_SECURE_URLS', True)
FILE_NAME_CHARSET = getattr(settings, 'AWS_S3_FILE_NAME_CHARSET', 'utf-8')
FILE_OVERWRITE = getattr(settings, 'AWS_S3_FILE_OVERWRITE', True)
IS_GZIPPED = getattr(settings, 'AWS_IS_GZIPPED', False)
PRELOAD_METADATA = getattr(settings, 'AWS_PRELOAD_METADATA', False)
GZIP_CONTENT_TYPES = getattr(settings, 'GZIP_CONTENT_TYPES', (
'text/css',
'application/javascript',
'application/x-javascript'
))
if IS_GZIPPED:
from gzip import GzipFile
def safe_join(base, *paths):
"""
A version of django.utils._os.safe_join for S3 paths.
Joins one or more path components to the base path component intelligently.
Returns a normalized version of the final path.
The final path must be located inside of the base path component (otherwise
a ValueError is raised).
Paths outside the base path indicate a possible security sensitive operation.
"""
from urlparse import urljoin
base_path = force_unicode(base)
paths = map(lambda p: force_unicode(p), paths)
final_path = urljoin(base_path + ("/" if not base_path.endswith("/") else ""), *paths)
# Ensure final_path starts with base_path and that the next character after
# the final path is '/' (or nothing, in which case final_path must be
# equal to base_path).
base_path_len = len(base_path)
if not final_path.startswith(base_path) \
or final_path[base_path_len:base_path_len+1] not in ('', '/'):
raise ValueError('the joined path is located outside of the base path'
' component')
return final_path
class S3BotoStorage(Storage):
"""Amazon Simple Storage Service using Boto"""
def __init__(self, bucket=STORAGE_BUCKET_NAME, access_key=None,
secret_key=None, bucket_acl=BUCKET_ACL, acl=DEFAULT_ACL, headers=HEADERS,
gzip=IS_GZIPPED, gzip_content_types=GZIP_CONTENT_TYPES,
querystring_auth=QUERYSTRING_AUTH, querystring_expire=QUERYSTRING_EXPIRE,
reduced_redundancy=REDUCED_REDUNDANCY,
custom_domain=CUSTOM_DOMAIN, secure_urls=SECURE_URLS,
location=LOCATION, file_name_charset=FILE_NAME_CHARSET,
preload_metadata=PRELOAD_METADATA, calling_format=CALLING_FORMAT):
self.bucket_acl = bucket_acl
self.bucket_name = bucket
self.acl = acl
self.headers = headers
self.preload_metadata = preload_metadata
self.gzip = gzip
self.gzip_content_types = gzip_content_types
self.querystring_auth = querystring_auth
self.querystring_expire = querystring_expire
self.reduced_redundancy = reduced_redundancy
self.custom_domain = custom_domain
self.secure_urls = secure_urls
self.location = location or ''
self.location = self.location.lstrip('/')
self.file_name_charset = file_name_charset
if not access_key and not secret_key:
access_key, secret_key = self._get_access_keys()
self.connection = S3Connection(access_key, secret_key, calling_format=calling_format)
self._entries = {}
@property
def bucket(self):
if not hasattr(self, '_bucket'):
self._bucket = self._get_or_create_bucket(self.bucket_name)
return self._bucket
@property
def entries(self):
if self.preload_metadata and not self._entries:
self._entries = dict((self._decode_name(entry.key), entry)
for entry in self.bucket.list())
return self._entries
def _get_key(self, name):
""" Get this key from the bucket, if not already in the entries """
key_name = self._encode_name(name)
if key_name in self.entries:
key = self.entries[key_name]
else:
key = self.bucket.get_key(key_name)
if key and self.preload_metadata:
self._entries[key_name] = key
return key
def _delete_key(self, name):
""" Delete this key from the bucket and from the entries """
key_name = self._encode_name(name)
self._entries.pop(key_name, None) # Remove from preloaded cache, if we're using that
self.bucket.delete_key(key_name)
def _get_access_keys(self):
access_key = ACCESS_KEY_NAME
secret_key = SECRET_KEY_NAME
if (access_key or secret_key) and (not access_key or not secret_key):
access_key = os.environ.get(ACCESS_KEY_NAME)
secret_key = os.environ.get(SECRET_KEY_NAME)
if access_key and secret_key:
# Both were provided, so use them
return access_key, secret_key
return None, None
def _get_or_create_bucket(self, name):
"""Retrieves a bucket if it exists, otherwise creates it."""
try:
return self.connection.get_bucket(name, validate=AUTO_CREATE_BUCKET)
except S3ResponseError, e:
if AUTO_CREATE_BUCKET:
bucket = self.connection.create_bucket(name)
bucket.set_acl(self.bucket_acl)
return bucket
raise ImproperlyConfigured, ("Bucket specified by "
"AWS_STORAGE_BUCKET_NAME does not exist. Buckets can be "
"automatically created by setting AWS_AUTO_CREATE_BUCKET=True")
def _clean_name(self, name):
# Useful for windows' paths
return os.path.normpath(name).replace('\\', '/')
def _normalize_name(self, name):
try:
return safe_join(self.location, name).lstrip('/')
except ValueError:
raise SuspiciousOperation("Attempted access to '%s' denied." % name)
def _encode_name(self, name):
return smart_str(name, encoding=self.file_name_charset)
def _decode_name(self, name):
return force_unicode(name, encoding=self.file_name_charset)
def _compress_content(self, content):
"""Gzip a given string."""
zbuf = StringIO()
zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(content.read())
zfile.close()
content.file = zbuf
return content
def _open(self, name, mode='rb'):
name = self._normalize_name(self._clean_name(name))
f = S3BotoStorageFile(name, mode, self)
if not f.key:
raise IOError('File does not exist: %s' % name)
return f
def _save(self, name, content):
cleaned_name = self._clean_name(name)
name = self._normalize_name(cleaned_name)
headers = self.headers.copy()
content_type = getattr(content,'content_type', mimetypes.guess_type(name)[0] or Key.DefaultContentType)
if self.gzip and content_type in self.gzip_content_types:
content = self._compress_content(content)
headers.update({'Content-Encoding': 'gzip'})
content.name = cleaned_name
k = self._get_key(name)
if not k:
k = self.bucket.new_key(self._encode_name(name))
k.set_metadata('Content-Type',content_type)
k.set_contents_from_file(content, headers=headers, policy=self.acl,
reduced_redundancy=self.reduced_redundancy)
# Add to entries cache before leaving
if self.preload_metadata:
self._entries[self._encode_name(name)] = k
return cleaned_name
def delete(self, name):
name = self._normalize_name(self._clean_name(name))
self._delete_key(name)
def exists(self, name):
name = self._normalize_name(self._clean_name(name))
if self.entries and name in self.entries:
return True
k = self.bucket.new_key(self._encode_name(name))
if k.exists():
if self.entries:
# Update this key in the cache; it has been created in another process
self._entries[self._encode_name(name)] = k
return True
return False
def listdir(self, name):
name = self._normalize_name(self._clean_name(name))
dirlist = self.bucket.list(self._encode_name(name))
files = []
dirs = set()
base_parts = name.split("/") if name else []
for item in dirlist:
parts = item.name.split("/")
parts = parts[len(base_parts):]
if len(parts) == 1:
# File
files.append(parts[0])
elif len(parts) > 1:
# Directory
dirs.add(parts[0])
return list(dirs),files
def size(self, name):
name = self._normalize_name(self._clean_name(name))
if self.entries:
entry = self.entries.get(name)
if entry:
return entry.size
return 0
return self._get_key(name).size
def modified_time(self, name):
try:
from dateutil import parser, tz
except ImportError:
raise NotImplementedError()
name = self._normalize_name(self._clean_name(name))
entry = self._get_key(name)
# convert to string to date
last_modified_date = parser.parse(entry.last_modified)
# if the date has no timzone, assume UTC
if last_modified_date.tzinfo == None:
last_modified_date = last_modified_date.replace(tzinfo=tz.tzutc())
# convert date to local time w/o timezone
return last_modified_date.astimezone(tz.tzlocal()).replace(tzinfo=None)
def url(self, name):
name = self._normalize_name(self._clean_name(name))
if self.custom_domain:
return "%s://%s/%s" % ('https' if self.secure_urls else 'http', self.custom_domain, name)
else:
return self.connection.generate_url(self.querystring_expire, method='GET', \
bucket=self.bucket.name, key=self._encode_name(name), query_auth=self.querystring_auth, \
force_http=not self.secure_urls)
def get_available_name(self, name):
""" Overwrite existing file with the same name. """
if FILE_OVERWRITE:
name = self._clean_name(name)
return name
return super(S3BotoStorage, self).get_available_name(name)
class S3BotoStorageFile(File):
def __init__(self, name, mode, storage):
self._storage = storage
self.name = name[len(self._storage.location):].lstrip('/')
self._mode = mode
self.key = storage._get_key(name)
self._is_dirty = False
self._file = None
@property
def size(self):
return self.key.size
@property
def file(self):
if self._file is None:
self._file = StringIO()
if 'r' in self._mode:
self._is_dirty = False
self.key.get_contents_to_file(self._file)
self._file.seek(0)
return self._file
def read(self, *args, **kwargs):
if 'r' not in self._mode:
raise AttributeError("File was not opened in read mode.")
return super(S3BotoStorageFile, self).read(*args, **kwargs)
def write(self, *args, **kwargs):
if 'w' not in self._mode:
raise AttributeError("File was opened for read-only access.")
self._is_dirty = True
return super(S3BotoStorageFile, self).write(*args, **kwargs)
def close(self):
if self._is_dirty:
self.key.set_contents_from_file(self._file, headers=self._storage.headers, policy=self._storage.acl)
self.key.close()
| |
import unittest
import pytest
from pytest import approx
import numpy as np
import os
import pycqed as pq
from pycqed.instrument_drivers.meta_instrument import device_object_CCL as do
from pycqed.instrument_drivers.meta_instrument.qubit_objects.CCL_Transmon import CCLight_Transmon
from pycqed.instrument_drivers.meta_instrument.LutMans.ro_lutman import UHFQC_RO_LutMan
from pycqed.instrument_drivers.meta_instrument.LutMans import mw_lutman as mwl
from pycqed.instrument_drivers.meta_instrument.LutMans.base_lutman import Base_LutMan
import pycqed.analysis.analysis_toolbox as a_tools
from pycqed.measurement import measurement_control
from pycqed.measurement.detector_functions import (
Multi_Detector_UHF,
UHFQC_input_average_detector,
UHFQC_integrated_average_detector,
UHFQC_integration_logging_det,
)
from pycqed.instrument_drivers.virtual_instruments.virtual_SignalHound import virtual_SignalHound_USB_SA124B
from pycqed.instrument_drivers.virtual_instruments.virtual_MW_source import VirtualMWsource
from pycqed.instrument_drivers.library.Transport import DummyTransport
from pycqed.instrument_drivers.physical_instruments.QuTech.CC import CC
from pycqed.instrument_drivers.physical_instruments.ZurichInstruments.UHFQuantumController import UHFQC
from pycqed.instrument_drivers.physical_instruments.ZurichInstruments.ZI_HDAWG8 import ZI_HDAWG8
from pycqed.instrument_drivers.physical_instruments.QuTech_VSM_Module import Dummy_QuTechVSMModule
from qcodes import station, Instrument
class Test_Device_obj(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
This sets up a mock setup using a CC to control multiple qubits
"""
cls.station = station.Station()
cls.CC = CC('CC', DummyTransport())
cls.UHFQC_0 = UHFQC(name="UHFQC_0", server="emulator", device="dev2109", interface="1GbE")
cls.UHFQC_1 = UHFQC(name="UHFQC_1", server="emulator", device="dev2110", interface="1GbE")
cls.UHFQC_2 = UHFQC(name="UHFQC_2", server="emulator", device="dev2111", interface="1GbE")
cls.AWG_mw_0 = ZI_HDAWG8(
name="AWG_mw_0",
server="emulator",
num_codewords=128,
device="dev8026",
interface="1GbE",
)
cls.AWG_mw_1 = ZI_HDAWG8(
name="AWG_mw_1",
server="emulator",
num_codewords=128,
device="dev8027",
interface="1GbE",
)
cls.AWG_flux_0 = ZI_HDAWG8(
name="AWG_flux_0",
server="emulator",
num_codewords=128,
device="dev8028",
interface="1GbE",
)
cls.VSM = Dummy_QuTechVSMModule('VSM')
cls.MW1 = VirtualMWsource("MW1")
cls.MW2 = VirtualMWsource("MW2")
cls.MW3 = VirtualMWsource("MW3")
cls.SH = virtual_SignalHound_USB_SA124B("SH")
cls.MC = measurement_control.MeasurementControl("MC", live_plot_enabled=False, verbose=False)
cls.MC.station = cls.station
cls.station.add_component(cls.MC)
# Required to set it to the testing datadir
test_datadir = os.path.join(pq.__path__[0], "tests", "test_output")
cls.MC.datadir(test_datadir)
a_tools.datadir = cls.MC.datadir()
if 0: # FIXME: PR #658: test broken by commit bd19f56
cls.mw_lutman = mwl.AWG8_VSM_MW_LutMan("MW_LutMan_VSM")
cls.mw_lutman.AWG(cls.AWG_mw_0.name)
cls.mw_lutman.channel_GI(1)
cls.mw_lutman.channel_GQ(2)
cls.mw_lutman.channel_DI(3)
cls.mw_lutman.channel_DQ(4)
else: # FIXME: workaround
cls.mw_lutman = mwl.AWG8_MW_LutMan("MW_LutMan")
cls.mw_lutman.AWG(cls.AWG_mw_0.name)
cls.mw_lutman.channel_I(1)
cls.mw_lutman.channel_Q(2)
cls.mw_lutman.mw_modulation(100e6)
cls.mw_lutman.sampling_rate(2.4e9)
cls.ro_lutman_0 = UHFQC_RO_LutMan("ro_lutman_0", feedline_number=0, feedline_map="S17", num_res=9)
cls.ro_lutman_0.AWG(cls.UHFQC_0.name)
cls.ro_lutman_1 = UHFQC_RO_LutMan("ro_lutman_1", feedline_number=1, feedline_map="S17", num_res=9)
cls.ro_lutman_1.AWG(cls.UHFQC_1.name)
cls.ro_lutman_2 = UHFQC_RO_LutMan("ro_lutman_2", feedline_number=2, feedline_map="S17", num_res=9)
cls.ro_lutman_2.AWG(cls.UHFQC_2.name)
# Assign instruments
qubits = []
for q_idx in range(17):
q = CCLight_Transmon("q{}".format(q_idx))
qubits.append(q)
q.instr_LutMan_MW(cls.mw_lutman.name)
q.instr_LO_ro(cls.MW1.name)
q.instr_LO_mw(cls.MW2.name)
q.instr_spec_source(cls.MW3.name)
# map qubits to UHFQC, *must* match mapping inside Base_RO_LutMan (Yuk)
if 0:
if q_idx in [13, 16]:
q.instr_acquisition(cls.UHFQC_0.name)
q.instr_LutMan_RO(cls.ro_lutman_0.name)
elif q_idx in [1, 4, 5, 7, 8, 10, 11, 14, 15]:
q.instr_acquisition(cls.UHFQC_1.name)
q.instr_LutMan_RO(cls.ro_lutman_1.name)
elif q_idx in [0, 2, 3, 6, 9, 12]:
q.instr_acquisition(cls.UHFQC_2.name)
q.instr_LutMan_RO(cls.ro_lutman_2.name)
else:
if q_idx in [6, 11]:
q.instr_acquisition(cls.UHFQC_0.name)
q.instr_LutMan_RO(cls.ro_lutman_0.name)
elif q_idx in [0, 1, 2, 3, 7, 8, 12, 13, 15]:
q.instr_acquisition(cls.UHFQC_1.name)
q.instr_LutMan_RO(cls.ro_lutman_1.name)
elif q_idx in [4, 5, 9, 10, 14, 16]:
q.instr_acquisition(cls.UHFQC_2.name)
q.instr_LutMan_RO(cls.ro_lutman_2.name)
q.instr_VSM(cls.VSM.name)
q.instr_CC(cls.CC.name)
q.instr_MC(cls.MC.name)
q.instr_SH(cls.SH.name)
config_fn = os.path.join(pq.__path__[0], "tests", "test_cfg_cc.json")
q.cfg_openql_platform_fn(config_fn)
# Setting some "random" initial parameters
q.ro_freq(5.43e9 + q_idx * 50e6)
q.ro_freq_mod(200e6)
q.freq_qubit(4.56e9 + q_idx * 50e6)
q.freq_max(4.62e9 + q_idx * 50e6)
q.mw_freq_mod(-100e6)
q.mw_awg_ch(1)
q.cfg_qubit_nr(q_idx)
# q.mw_vsm_delay(15)
q.mw_mixer_offs_GI(0.1)
q.mw_mixer_offs_GQ(0.2)
q.mw_mixer_offs_DI(0.3)
q.mw_mixer_offs_DQ(0.4)
# Set up the device object and set required params
cls.device = do.DeviceCCL("device")
cls.device.qubits([q.name for q in qubits])
cls.device.instr_CC(cls.CC.name)
cls.device.instr_AWG_mw_0(cls.AWG_mw_0.name)
cls.device.instr_AWG_mw_1(cls.AWG_mw_1.name)
cls.device.instr_AWG_flux_0(cls.AWG_flux_0.name)
if 0:
cls.device.ro_lo_freq(6e9)
else: # FIXME: frequency now per LutMan
cls.ro_lutman_0.LO_freq(6e9)
cls.ro_lutman_1.LO_freq(6e9)
cls.ro_lutman_2.LO_freq(6e9)
if 0: # FIXME: CCL/QCC deprecated
# Fixed by design
cls.dio_map_CCL = {"ro_0": 1, "ro_1": 2, "flux_0": 3, "mw_0": 4, "mw_1": 5}
# Fixed by design
cls.dio_map_QCC = {
"ro_0": 1,
"ro_1": 2,
"ro_2": 3,
"mw_0": 4,
"mw_1": 5,
"flux_0": 6,
"flux_1": 7,
"flux_2": 8,
"mw_2": 9,
"mw_3": 10,
"mw_4": 11,
}
# Modular, arbitrary example here
cls.dio_map_CC = {
"ro_0": 0,
"ro_1": 1,
"ro_2": 2,
"mw_0": 3,
"mw_1": 4,
"flux_0": 6,
"flux_1": 7,
"flux_2": 8,
}
cls.device.dio_map(cls.dio_map_CC)
@classmethod
def tearDownClass(cls):
Instrument.close_all()
@unittest.skip("CCL/QCC is removed")
def test_get_dio_map(self):
self.device.instr_CC(self.CCL.name)
# 2020-03-20
# dio_map need to be specified manually by the user for each setup
# this is necessary due to the new modularity of CC
expected_dio_map = self.dio_map_CCL
self.device.dio_map(expected_dio_map)
dio_map = self.device.dio_map()
assert dio_map == expected_dio_map
self.device.instr_CC(self.QCC.name)
expected_dio_map = self.dio_map_QCC
self.device.dio_map(expected_dio_map)
dio_map = self.device.dio_map()
assert dio_map == expected_dio_map
def test_get_dio_map_CC(self):
self.device.instr_CC(self.CC.name)
# 2020-03-20
# dio_map need to be specified manually by the user for each setup
# this is necessary due to the new modularity of CC
expected_dio_map = self.dio_map_CC
self.device.dio_map(expected_dio_map)
dio_map = self.device.dio_map()
assert dio_map == expected_dio_map
@unittest.skip("CCL is removed")
def test_prepare_timing_CCL(self):
self.device.instr_CC(self.CCL.name)
self.device.dio_map(self.dio_map_CCL)
self.device.tim_ro_latency_0(200e-9)
self.device.tim_ro_latency_1(180e-9)
self.device.tim_flux_latency_0(-40e-9)
self.device.tim_mw_latency_0(20e-9)
self.device.tim_mw_latency_1(0e-9)
self.device.prepare_timing()
# DIO timing map for CCL:
# dio1: ro_latency_0
# dio2: ro_latency_1
# dio3: flux_latency_0
# dio4: mw_latency_0
# dio5: mw_latency_1
assert self.CCL.dio1_out_delay() == 12
assert self.CCL.dio2_out_delay() == 11
assert self.CCL.dio3_out_delay() == 0
assert self.CCL.dio4_out_delay() == 3
assert self.CCL.dio5_out_delay() == 2
@unittest.skip("QCC is removed")
def test_prepare_timing_QCC(self):
self.device.instr_CC(self.QCC.name)
self.device.dio_map(self.dio_map_QCC)
self.device.tim_ro_latency_0(200e-9)
self.device.tim_ro_latency_1(180e-9)
self.device.tim_flux_latency_0(-40e-9)
self.device.tim_flux_latency_1(100e-9)
self.device.tim_mw_latency_0(20e-9)
self.device.tim_mw_latency_1(0e-9)
self.device.prepare_timing()
assert self.QCC.dio1_out_delay() == 12
assert self.QCC.dio2_out_delay() == 11
assert self.QCC.dio4_out_delay() == 3
assert self.QCC.dio5_out_delay() == 2
assert self.QCC.dio6_out_delay() == 0
assert self.QCC.dio7_out_delay() == 7
@unittest.skip("QCC is removed")
def test_prepare_timing_QCC_fine(self):
self.device.instr_CC(self.QCC.name)
self.device.dio_map(self.dio_map_QCC)
self.device.tim_ro_latency_0(200e-9)
self.device.tim_ro_latency_1(180e-9)
self.device.tim_flux_latency_0(-36e-9)
self.device.tim_flux_latency_1(100e-9)
self.device.tim_mw_latency_0(23e-9)
self.device.tim_mw_latency_1(0e-9)
self.device.prepare_timing()
assert self.QCC.dio1_out_delay() == 12
assert self.QCC.dio2_out_delay() == 11
assert self.QCC.dio4_out_delay() == 3
assert self.QCC.dio5_out_delay() == 2
assert self.QCC.dio6_out_delay() == 0
assert self.QCC.dio7_out_delay() == 7
if 0: # # FIXME: PR #658: test broken by commit bd19f56
assert self.AWG_flux_0.sigouts_0_delay() == approx(4e-9)
assert self.AWG_flux_0.sigouts_7_delay() == approx(4e-9)
assert self.AWG_mw_0.sigouts_7_delay() == approx(3e-9)
assert self.AWG_mw_0.sigouts_7_delay() == approx(3e-9)
assert self.AWG_mw_1.sigouts_7_delay() == approx(0)
assert self.AWG_mw_1.sigouts_7_delay() == approx(0)
def test_prepare_timing_CC(self):
self.device.instr_CC(self.CC.name)
self.device.dio_map(self.dio_map_CC)
self.device.tim_ro_latency_0(200e-9)
self.device.tim_ro_latency_1(180e-9)
self.device.tim_flux_latency_0(-40e-9)
self.device.tim_flux_latency_1(100e-9)
self.device.tim_mw_latency_0(20e-9)
self.device.tim_mw_latency_1(0e-9)
self.device.prepare_timing()
assert self.CC.dio0_out_delay() == 12
assert self.CC.dio1_out_delay() == 11
assert self.CC.dio3_out_delay() == 3
assert self.CC.dio4_out_delay() == 2
assert self.CC.dio6_out_delay() == 0
assert self.CC.dio7_out_delay() == 7
def test_prepare_readout_lo_freqs_config(self):
# Test that the modulation frequencies of all qubits are set correctly.
self.device.ro_acq_weight_type("optimal")
qubits = self.device.qubits()
self.ro_lutman_0.LO_freq(6e9)
self.ro_lutman_1.LO_freq(6e9)
self.ro_lutman_2.LO_freq(6e9)
self.device.prepare_readout(qubits=qubits)
# MW1 is specified as the readout LO source
assert self.MW1.frequency() == 6e9
for qname in qubits:
q = self.device.find_instrument(qname)
assert 6e9 + q.ro_freq_mod() == q.ro_freq()
self.ro_lutman_0.LO_freq(5.8e9)
self.ro_lutman_1.LO_freq(5.8e9)
self.ro_lutman_2.LO_freq(5.8e9)
self.device.prepare_readout(qubits=qubits)
# MW1 is specified as the readout LO source
assert self.MW1.frequency() == 5.8e9
for qname in qubits:
q = self.device.find_instrument(qname)
assert 5.8e9 + q.ro_freq_mod() == q.ro_freq()
# FIXME: no longer raises exception
# q = self.device.find_instrument("q5")
# q.instr_LO_ro(self.MW3.name)
# with pytest.raises(ValueError):
# self.device.prepare_readout(qubits=qubits)
# q.instr_LO_ro(self.MW1.name)
def test_prepare_readout_assign_weights(self):
self.ro_lutman_0.LO_freq(6e9)
self.ro_lutman_1.LO_freq(6e9)
self.ro_lutman_2.LO_freq(6e9)
self.device.ro_acq_weight_type("optimal")
qubits = self.device.qubits()
q13 = self.device.find_instrument("q13")
q13.ro_acq_weight_func_I(np.ones(128))
q13.ro_acq_weight_func_Q(np.ones(128) * 0.5)
self.device.prepare_readout(qubits=qubits)
exp_ch_map = {
'UHFQC_1': {'q0': 0, 'q1': 1, 'q2': 2, 'q3': 3, 'q7': 4, 'q8': 5, 'q12': 6, 'q13': 7, 'q15': 8},
'UHFQC_2': {'q4': 0, 'q5': 1, 'q9': 2, 'q10': 3, 'q14': 4, 'q16': 5},
'UHFQC_0': {'q6': 0, 'q11': 1}
}
assert exp_ch_map == self.device._acq_ch_map
qb = self.device.find_instrument("q12")
assert qb.ro_acq_weight_chI() == 6
assert qb.ro_acq_weight_chQ() == 7
def test_prepare_readout_assign_weights_order_matters(self):
# Test that the order of the channels is as in the order iterated over
qubits = ["q2", "q3", "q0"]
self.device.ro_acq_weight_type("optimal")
self.device.prepare_readout(qubits=qubits)
exp_ch_map = {"UHFQC_1": {"q0": 2, "q2": 0, "q3": 1}}
assert exp_ch_map == self.device._acq_ch_map
qb = self.device.find_instrument("q3")
assert qb.ro_acq_weight_chI() == 1
assert qb.ro_acq_weight_chQ() == 2
def test_prepare_readout_assign_weights_IQ_counts_double(self):
qubits = ["q2", "q3", "q0", "q13", "q16"]
self.device.ro_acq_weight_type("SSB")
self.device.prepare_readout(qubits=qubits)
exp_ch_map = {
'UHFQC_1': {'q0': 4, 'q13': 6, 'q2': 0, 'q3': 2},
'UHFQC_2': {'q16': 0}
}
assert exp_ch_map == self.device._acq_ch_map
qb = self.device.find_instrument("q16")
assert qb.ro_acq_weight_chI() == 0
assert qb.ro_acq_weight_chQ() == 1
def test_prepare_readout_assign_weights_too_many_raises(self):
qubits = self.device.qubits()
self.device.ro_acq_weight_type("SSB")
with pytest.raises(ValueError):
self.device.prepare_readout(qubits=qubits)
def test_prepare_readout_resets_UHF(self):
uhf = self.device.find_instrument("UHFQC_1")
uhf.qas_0_correlations_5_enable(1)
uhf.qas_0_correlations_5_source(3)
uhf.qas_0_thresholds_5_correlation_enable(1)
uhf.qas_0_thresholds_5_correlation_source(3)
assert uhf.qas_0_correlations_5_enable() == 1
assert uhf.qas_0_correlations_5_source() == 3
assert uhf.qas_0_thresholds_5_correlation_enable() == 1
assert uhf.qas_0_thresholds_5_correlation_source() == 3
self.device.prepare_readout(qubits=["q0"])
assert uhf.qas_0_correlations_5_enable() == 0
assert uhf.qas_0_correlations_5_source() == 0
assert uhf.qas_0_thresholds_5_correlation_enable() == 0
assert uhf.qas_0_thresholds_5_correlation_source() == 0
def test_prepare_ro_pulses_resonator_combinations(self):
# because not all combinations are supported the default is to
# support
qubits = ["q2", "q3", "q0", "q13", "q16"]
self.device.prepare_readout(qubits=qubits)
# Combinations are based on qubit number
res_combs0 = self.ro_lutman_0.resonator_combinations()
exp_res_combs0 = [[11]]
assert res_combs0 == exp_res_combs0
res_combs1 = self.ro_lutman_1.resonator_combinations()
exp_res_combs1 = [[2, 3, 0, 13]]
assert res_combs1 == exp_res_combs1
res_combs2 = self.ro_lutman_2.resonator_combinations()
exp_res_combs2 = [[16]]
assert res_combs2 == exp_res_combs2
def test_prepare_ro_pulses_lutman_pars_updated(self):
q = self.device.find_instrument("q5")
q.ro_pulse_amp(0.4)
self.device.prepare_readout(["q5"])
ro_amp = self.ro_lutman_2.M_amp_R5()
assert ro_amp == 0.4
q.ro_pulse_amp(0.2)
self.device.prepare_readout(["q5"])
ro_amp = self.ro_lutman_2.M_amp_R5()
assert ro_amp == 0.2
def test_prep_ro_input_avg_det(self):
qubits = self.device.qubits()
self.device.ro_acq_weight_type("optimal")
self.device.prepare_readout(qubits=qubits)
inp_avg_det = self.device.get_input_avg_det()
assert isinstance(inp_avg_det, Multi_Detector_UHF)
assert len(inp_avg_det.detectors) == 3
for ch_det in inp_avg_det.detectors:
assert isinstance(ch_det, UHFQC_input_average_detector)
# Note that UHFQC_1 is first because q0 is the first in device.qubits
assert inp_avg_det.value_names == [
"UHFQC_1 ch0",
"UHFQC_1 ch1",
"UHFQC_2 ch0",
"UHFQC_2 ch1",
"UHFQC_0 ch0",
"UHFQC_0 ch1",
]
def test_prepare_ro_instantiate_detectors_int_avg_optimal(self):
qubits = ["q11", "q16", "q1", "q5", "q0"]
self.device.ro_acq_weight_type("optimal")
self.device.prepare_readout(qubits=qubits)
int_avg_det = self.device.get_int_avg_det()
assert isinstance(int_avg_det, Multi_Detector_UHF)
assert len(int_avg_det.detectors) == 3
for ch_det in int_avg_det.detectors:
assert isinstance(ch_det, UHFQC_integrated_average_detector)
assert int_avg_det.value_names == [
"UHFQC_0 w0 q11",
"UHFQC_2 w0 q16",
"UHFQC_2 w1 q5",
"UHFQC_1 w0 q1",
"UHFQC_1 w1 q0",
]
def test_prepare_ro_instantiate_detectors_int_avg_ssb(self):
qubits = ["q11", "q16", "q1", "q5", "q0"]
self.device.ro_acq_weight_type("SSB")
self.device.prepare_readout(qubits=qubits)
int_avg_det = self.device.get_int_avg_det()
assert isinstance(int_avg_det, Multi_Detector_UHF)
assert len(int_avg_det.detectors) == 3
for ch_det in int_avg_det.detectors:
assert isinstance(ch_det, UHFQC_integrated_average_detector)
assert int_avg_det.value_names == [
"UHFQC_0 w0 q11 I",
"UHFQC_0 w1 q11 Q",
"UHFQC_2 w0 q16 I",
"UHFQC_2 w1 q16 Q",
"UHFQC_2 w2 q5 I",
"UHFQC_2 w3 q5 Q",
"UHFQC_1 w0 q1 I",
"UHFQC_1 w1 q1 Q",
"UHFQC_1 w2 q0 I",
"UHFQC_1 w3 q0 Q",
]
# Note that the order of channels gets ordered per feedline
# because of the way the multi detector works
def test_prepare_ro_instantiate_detectors_int_logging_optimal(self):
qubits = ["q11", "q16", "q1", "q5", "q0"]
self.device.ro_acq_weight_type("optimal")
self.device.prepare_readout(qubits=qubits)
int_log_det = self.device.get_int_logging_detector()
assert isinstance(int_log_det, Multi_Detector_UHF)
assert len(int_log_det.detectors) == 3
for ch_det in int_log_det.detectors:
assert isinstance(ch_det, UHFQC_integration_logging_det)
assert int_log_det.value_names == [
"UHFQC_0 w0 q11",
"UHFQC_2 w0 q16",
"UHFQC_2 w1 q5",
"UHFQC_1 w0 q1",
"UHFQC_1 w1 q0",
]
qubits = self.device.qubits()
def test_prepare_ro_instantiate_detectors_int_logging_ssb(self):
qubits = ["q11", "q16", "q1", "q5", "q0"]
self.device.ro_acq_weight_type("SSB")
self.device.prepare_readout(qubits=qubits)
int_log_det = self.device.get_int_logging_detector()
assert isinstance(int_log_det, Multi_Detector_UHF)
assert len(int_log_det.detectors) == 3
for ch_det in int_log_det.detectors:
assert isinstance(ch_det, UHFQC_integration_logging_det)
assert int_log_det.value_names == [
"UHFQC_0 w0 q11 I",
"UHFQC_0 w1 q11 Q",
"UHFQC_2 w0 q16 I",
"UHFQC_2 w1 q16 Q",
"UHFQC_2 w2 q5 I",
"UHFQC_2 w3 q5 Q",
"UHFQC_1 w0 q1 I",
"UHFQC_1 w1 q1 Q",
"UHFQC_1 w2 q0 I",
"UHFQC_1 w3 q0 Q",
]
def test_prepare_readout_mixer_settings(self):
pass
def test_acq_ch_map_to_IQ_ch_map(self):
ch_map = {
"UHFQC_0": {"q13": 0, "q16": 2},
"UHFQC_1": {"q1": 0, "q4": 4},
"UHFQC_2": {"q0": 0, "q3": 2, "q6": 4},
}
IQ_ch_map = do._acq_ch_map_to_IQ_ch_map(ch_map)
exp_IQ_ch_map = {
"UHFQC_0": {"q13 I": 0, "q13 Q": 1, "q16 I": 2, "q16 Q": 3},
"UHFQC_1": {"q1 I": 0, "q1 Q": 1, "q4 I": 4, "q4 Q": 5},
"UHFQC_2": {"q0 I": 0, "q0 Q": 1, "q3 I": 2, "q3 Q": 3, "q6 I": 4, "q6 Q": 5},
}
assert IQ_ch_map == exp_IQ_ch_map
def test_base_lutman_make(self):
# make first time
n1 = Base_LutMan.make()
assert n1 == 4
# make again, should now return 0
n2 = Base_LutMan.make()
assert n2 == 0
# change some LutMan parameter, should rebuild
old_val = self.mw_lutman.mw_modulation()
self.mw_lutman.mw_modulation(old_val - 1e6) # change modulation.
n3 = Base_LutMan.make()
self.mw_lutman.mw_modulation(old_val) # restore modulation.
assert n3 == 1
# manually change LutMan
# note that load_ef_rabi_pulses_to_AWG_lookuptable already updates everything, but sidesteps make, which will
# the update again. Eventually, everything needs to go through make
self.mw_lutman.load_ef_rabi_pulses_to_AWG_lookuptable()
n4 = Base_LutMan.make()
assert n4 == 1
| |
"""Slack platform for notify component."""
import asyncio
import logging
import os
from urllib.parse import urlparse
from aiohttp import BasicAuth, FormData
from aiohttp.client_exceptions import ClientError
from slack import WebClient
from slack.errors import SlackApiError
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
ATTR_TITLE,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import CONF_API_KEY, CONF_ICON, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client, config_validation as cv
import homeassistant.helpers.template as template
_LOGGER = logging.getLogger(__name__)
ATTR_BLOCKS = "blocks"
ATTR_BLOCKS_TEMPLATE = "blocks_template"
ATTR_FILE = "file"
ATTR_ICON = "icon"
ATTR_PASSWORD = "password"
ATTR_PATH = "path"
ATTR_URL = "url"
ATTR_USERNAME = "username"
CONF_DEFAULT_CHANNEL = "default_channel"
DEFAULT_TIMEOUT_SECONDS = 15
FILE_PATH_SCHEMA = vol.Schema({vol.Required(ATTR_PATH): cv.isfile})
FILE_URL_SCHEMA = vol.Schema(
{
vol.Required(ATTR_URL): cv.url,
vol.Inclusive(ATTR_USERNAME, "credentials"): cv.string,
vol.Inclusive(ATTR_PASSWORD, "credentials"): cv.string,
}
)
DATA_FILE_SCHEMA = vol.Schema(
{vol.Required(ATTR_FILE): vol.Any(FILE_PATH_SCHEMA, FILE_URL_SCHEMA)}
)
DATA_TEXT_ONLY_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_USERNAME): cv.string,
vol.Optional(ATTR_ICON): cv.string,
vol.Optional(ATTR_BLOCKS): list,
vol.Optional(ATTR_BLOCKS_TEMPLATE): list,
}
)
DATA_SCHEMA = vol.All(
cv.ensure_list, [vol.Any(DATA_FILE_SCHEMA, DATA_TEXT_ONLY_SCHEMA)]
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_DEFAULT_CHANNEL): cv.string,
vol.Optional(CONF_ICON): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
}
)
async def async_get_service(hass, config, discovery_info=None):
"""Set up the Slack notification service."""
session = aiohttp_client.async_get_clientsession(hass)
client = WebClient(token=config[CONF_API_KEY], run_async=True, session=session)
try:
await client.auth_test()
except SlackApiError as err:
_LOGGER.error("Error while setting up integration: %s", err)
return
return SlackNotificationService(
hass,
client,
config[CONF_DEFAULT_CHANNEL],
username=config.get(CONF_USERNAME),
icon=config.get(CONF_ICON),
)
@callback
def _async_get_filename_from_url(url):
"""Return the filename of a passed URL."""
parsed_url = urlparse(url)
return os.path.basename(parsed_url.path)
@callback
def _async_sanitize_channel_names(channel_list):
"""Remove any # symbols from a channel list."""
return [channel.lstrip("#") for channel in channel_list]
@callback
def _async_templatize_blocks(hass, value):
"""Recursive template creator helper function."""
if isinstance(value, list):
return [_async_templatize_blocks(hass, item) for item in value]
if isinstance(value, dict):
return {
key: _async_templatize_blocks(hass, item) for key, item in value.items()
}
tmpl = template.Template(value, hass=hass)
return tmpl.async_render(parse_result=False)
class SlackNotificationService(BaseNotificationService):
"""Define the Slack notification logic."""
def __init__(self, hass, client, default_channel, username, icon):
"""Initialize."""
self._client = client
self._default_channel = default_channel
self._hass = hass
self._icon = icon
self._username = username
async def _async_send_local_file_message(self, path, targets, message, title):
"""Upload a local file (with message) to Slack."""
if not self._hass.config.is_allowed_path(path):
_LOGGER.error("Path does not exist or is not allowed: %s", path)
return
parsed_url = urlparse(path)
filename = os.path.basename(parsed_url.path)
try:
await self._client.files_upload(
channels=",".join(targets),
file=path,
filename=filename,
initial_comment=message,
title=title or filename,
)
except SlackApiError as err:
_LOGGER.error("Error while uploading file-based message: %s", err)
async def _async_send_remote_file_message(
self, url, targets, message, title, *, username=None, password=None
):
"""Upload a remote file (with message) to Slack.
Note that we bypass the python-slackclient WebClient and use aiohttp directly,
as the former would require us to download the entire remote file into memory
first before uploading it to Slack.
"""
if not self._hass.config.is_allowed_external_url(url):
_LOGGER.error("URL is not allowed: %s", url)
return
filename = _async_get_filename_from_url(url)
session = aiohttp_client.async_get_clientsession(self.hass)
kwargs = {}
if username and password is not None:
kwargs = {"auth": BasicAuth(username, password=password)}
resp = await session.request("get", url, **kwargs)
try:
resp.raise_for_status()
except ClientError as err:
_LOGGER.error("Error while retrieving %s: %s", url, err)
return
data = FormData(
{
"channels": ",".join(targets),
"filename": filename,
"initial_comment": message,
"title": title or filename,
"token": self._client.token,
},
charset="utf-8",
)
data.add_field("file", resp.content, filename=filename)
try:
await session.post("https://slack.com/api/files.upload", data=data)
except ClientError as err:
_LOGGER.error("Error while uploading file message: %s", err)
async def _async_send_text_only_message(
self, targets, message, title, blocks, username, icon
):
"""Send a text-only message."""
message_dict = {
"blocks": blocks,
"link_names": True,
"text": message,
"username": username,
}
icon = icon or self._icon
if icon:
if icon.lower().startswith(("http://", "https://")):
icon_type = "url"
else:
icon_type = "emoji"
message_dict[f"icon_{icon_type}"] = icon
tasks = {
target: self._client.chat_postMessage(**message_dict, channel=target)
for target in targets
}
results = await asyncio.gather(*tasks.values(), return_exceptions=True)
for target, result in zip(tasks, results):
if isinstance(result, SlackApiError):
_LOGGER.error(
"There was a Slack API error while sending to %s: %s",
target,
result,
)
async def async_send_message(self, message, **kwargs):
"""Send a message to Slack."""
data = kwargs.get(ATTR_DATA)
if data is None:
data = {}
try:
DATA_SCHEMA(data)
except vol.Invalid as err:
_LOGGER.error("Invalid message data: %s", err)
data = {}
title = kwargs.get(ATTR_TITLE)
targets = _async_sanitize_channel_names(
kwargs.get(ATTR_TARGET, [self._default_channel])
)
# Message Type 1: A text-only message
if ATTR_FILE not in data:
if ATTR_BLOCKS_TEMPLATE in data:
blocks = _async_templatize_blocks(self.hass, data[ATTR_BLOCKS_TEMPLATE])
elif ATTR_BLOCKS in data:
blocks = data[ATTR_BLOCKS]
else:
blocks = {}
return await self._async_send_text_only_message(
targets,
message,
title,
blocks,
username=data.get(ATTR_USERNAME, self._username),
icon=data.get(ATTR_ICON, self._icon),
)
# Message Type 2: A message that uploads a remote file
if ATTR_URL in data[ATTR_FILE]:
return await self._async_send_remote_file_message(
data[ATTR_FILE][ATTR_URL],
targets,
message,
title,
username=data[ATTR_FILE].get(ATTR_USERNAME),
password=data[ATTR_FILE].get(ATTR_PASSWORD),
)
# Message Type 3: A message that uploads a local file
return await self._async_send_local_file_message(
data[ATTR_FILE][ATTR_PATH], targets, message, title
)
| |
"""
Check which packages should be dropped for the Mass Python 2 Package Removal:
https://fedoraproject.org/wiki/Changes/Mass_Python_2_Package_Removal
To run this:
- Install portingdb in a virtualenv:
python -m pip install -e.
- Load portingdb data:
python3 -m portingdb --datadir data/ load
- Run this script:
python -m portingdb check-drops
The script creates the directory `_check_drops`, containing cached stuff and
some result files.
"""
from pathlib import Path
import xml.sax
import gzip
import sys
import time
import json
import subprocess
import configparser
import shutil
import collections
import click
from portingdb import tables
cache_dir = Path('./_check_drops')
xml_default = {}
xml_option_args = {}
for x in 'filelists', 'primary':
xml_default[x] = list(Path('/var/cache/dnf').glob(
f'rawhide-????????????????/repodata/*-{x}.xml.gz'))
if len(xml_default[x]) == 1:
xml_option_args[x] = {'default': xml_default[x][0]}
else:
xml_option_args[x] = {'required': True}
def log(*args, **kwargs):
"""Print to stderr"""
kwargs.setdefault('file', sys.stderr)
print(*args, **kwargs)
def handle_filename(result, filename):
"""Look at a filename a RPM installs, and update "result" accordingly"""
if filename.startswith((
'/usr/lib/python2.7/',
'/usr/lib64/python2.7/',
)):
# Importable module; consider this for dropping
result['notes'].add('Python 2 module')
result['ignore'] = False
if filename.endswith('info/entry_points.txt'):
result['notes'].add('Entrypoint')
result.setdefault('entrypoints', []).append(filename)
elif filename.startswith((
'/usr/lib/python2.7/site-packages/libtaskotron/ext/',
)):
# Taskotron extension
result['notes'].add('Taskotron extension')
result['keep'] = True
elif filename.startswith((
'/usr/lib/python3.7/',
'/usr/lib64/python3.7/',
)):
# Python 3 module; ignore here, but freak out
result['notes'].add('Python 3 module')
elif filename.startswith((
'/usr/share/doc/',
'/usr/share/gtk-doc/',
'/usr/share/man/',
'/usr/share/licenses/',
)):
# Doc/licence; doesn't block dropping
result['notes'].add('Docs/Licences')
elif filename.startswith((
'/usr/share/locale/',
)) or '/LC_MESSAGES/' in filename:
# Locales; doesn't block dropping
result['notes'].add('Locales')
elif filename.startswith((
'/usr/share/icons/',
'/usr/share/pixmaps/',
)):
# Icons; doesn't block dropping
result['notes'].add('Icons')
elif dir_or_exact(filename, (
'/usr/share/pygtk/2.0/defs',
'/usr/share/gst-python/0.10/defs',
'/usr/share/pygtk/2.0/argtypes',
)) or filename.endswith((
'.glade',
'.ui',
)):
# UIs; doesn't block dropping
result['notes'].add('UIs')
elif filename.endswith((
'.html',
'.jinja2',
)) or 'templates' in filename:
# Templates; doesn't block dropping
result['notes'].add('Templates')
elif filename.startswith((
'/usr/lib/tmpfiles.d/',
'/usr/lib/udev/rules.d/',
'/usr/lib/pkgconfig/',
'/usr/lib64/pkgconfig/',
'/usr/share/bash-completion/',
'/usr/src/',
'/var/cache/',
'/var/lib/',
'/var/log/',
'/var/run/',
'/var/spool/',
'/var/tmp/',
'/etc/',
)):
# Logs/Cache/Config; doesn't block dropping
result['notes'].add('Logs/Cache/Config')
elif dir_or_exact(filename, (
'/usr/lib/.build-id',
)):
# Build ID; doesn't block dropping
result['notes'].add('Build ID')
elif dir_or_exact(filename, (
'/usr/lib/qt4/plugins/designer',
'/usr/lib64/qt4/plugins/designer',
'/usr/share/autocloud',
'/usr/share/conda',
'/usr/share/fmn.web',
'/usr/share/genmsg',
'/usr/share/gst-python',
'/usr/share/libavogadro',
'/usr/share/myhdl',
'/usr/share/ocio',
'/usr/share/os-brick',
'/usr/share/pgu',
'/usr/share/pygtk',
'/usr/share/pygtkchart',
'/usr/share/python-dmidecode',
'/usr/share/python-ldaptor',
'/usr/share/tomoe',
'/usr/share/viewvc',
'/usr/share/pygtk/2.0',
)):
# Various self contained files
result['notes'].add('Self Contained Files')
elif filename in (
'/usr/bin/tg-admin', # self contained for the module (TurboGears)
):
# Those are hardcoded commands we don't care about
result['notes'].add('Ignored command')
result['filename_command_ignored'] = filename
elif filename.startswith((
'/usr/bin/',
'/usr/sbin/',
'/usr/libexec/',
'/usr/lib/systemd/system/',
)):
# Command; might be needed
result['notes'].add('Command')
result['keep'] = True
result['filename_command'] = filename
elif filename.startswith((
'/usr/share/appdata/',
'/usr/share/applications/',
'/usr/share/metainfo/',
)):
# Application; might be needed
result['notes'].add('Application')
result['keep'] = True
result['filename_application'] = filename
elif not filename.startswith((
'/usr/lib/python2.7/',
'/usr/lib64/python2.7/',
)):
# Something else; might be needed
result['notes'].add('Unknown file')
result['filename_unknown'] = filename
def handle_entrypoints(result, config):
"""Look at a parsed entrypoints config, update "result" accordingly"""
for section in config.sections():
if section in ('console_scripts', 'gui_scripts'):
# Checked as commands in /usr/bin
pass
elif section in ('distutils.setup_keywords',
'distutils.commands',
'cliff.formatter.show',
'openstack.congressclient.v1',
'fiona.fio_commands',
'python.templating.engines',
'turbomail.transports',
'twine.registered_commands',
'paste.app_factory',
'buildutils.optional_commands',
'zaqarclient.transport',
'apscheduler.triggers',
'zest.releaser.releaser.middle',
'babel.extractors', # babel has this +1 unused pkg
'babel.checkers',
'moksha.consumer',
'cliff.formatter.list',
'openstack.cli.extension', # the CLI should use py3
'beaker.backends', # only beaker has this
'sphinx_themes', # we only keep non leafs
'sphinx.html_themes',
'tw2.widgets', # plugins for a framework, not app
) or section.startswith((
'turbogears.', # plugins for a framework, not app
)):
# Decided to be useless and/or self contained
pass
elif section.startswith('paste.'):
pass
elif (section == 'envisage.plugins'
and result['name'] == 'python2-envisage'):
pass
elif section in ('pytest11', ):
result['keep'] = True
result['notes'].append('Pytest plugin')
result['plugin_pytest'] = section
elif section in ('trac.plugins', ):
result['keep'] = True
result['notes'].append('Trac plugin')
result['plugin_trac'] = section
elif section.startswith('avocado.plugins'):
result['keep'] = True
result['notes'].append('Avocado plugin')
result['plugin_avocado'] = section
elif section.startswith(('pylama.linter', 'flake8')):
result['keep'] = True
result['notes'].append('Flake 8 / PyLama plugin')
result['plugin_pylama'] = section
elif section.startswith('pulp.extensions'):
result['keep'] = True
result['notes'].append('Pulp plugin')
result['plugin_pulp'] = section
elif section == 'certbot.plugins':
result['keep'] = True
result['notes'].append('Certobot plugin')
result['plugin_certbot'] = section
else:
# Possibly a plugin
result['needs_investigation'] = True
result['plugin_unknown'] = section
def dir_or_exact(filename, patterns):
patterns = tuple(p[:-1] if p.endswith('/') else p for p in patterns)
dirs = tuple(p + '/' for p in patterns)
return filename.startswith(dirs) or filename in patterns
class SaxFilesHandler(xml.sax.ContentHandler):
def __init__(self):
super().__init__()
self.results = {}
self.filename_parts = None
def startElement(self, name, attrs):
if name == 'package':
name = attrs['name']
self.current_result = {
'name': name,
'arch': attrs['arch'],
'notes': set(),
'ignore': True,
}
elif name == 'version':
_cp = self.current_result
_cp['nevra'] = [
_cp['name'], attrs['epoch'], attrs['ver'], attrs['rel'],
_cp.pop('arch')]
elif name == 'file':
self.filename_parts = []
def endElement(self, name):
if name == 'package':
if not self.current_result.pop('ignore'):
self.current_result['notes'] = sorted(self.current_result['notes'])
log(self.current_result)
self.results[self.current_result['name']] = self.current_result
del self.current_result
elif name == 'file':
filename = ''.join(self.filename_parts)
handle_filename(self.current_result, filename)
self.filename_parts = None
def characters(self, content):
if self.filename_parts is not None:
self.filename_parts.append(content)
class SaxPrimaryHandler(xml.sax.ContentHandler):
def __init__(self):
super().__init__()
self._sources = collections.defaultdict(set)
self.name_parts = None
self.source_parts = None
@property
def sources(self):
return {k: list(v) for k, v in self._sources.items()}
def startElement(self, name, attrs):
if name == 'package' and attrs['type'] == 'rpm':
self.current_result = {}
elif name == 'name':
self.name_parts = []
elif name == 'rpm:sourcerpm':
self.source_parts = []
def endElement(self, name):
if name == 'package':
log(self.current_result)
source = self.current_result['source'].rsplit('-', 2)[0]
self._sources[source].add(self.current_result['name'])
del self.current_result
elif name == 'name':
self.current_result['name'] = ''.join(self.name_parts)
self.name_parts = None
elif name == 'rpm:sourcerpm':
self.current_result['source'] = ''.join(self.source_parts)
self.source_parts = None
def characters(self, content):
if self.name_parts is not None:
self.name_parts.append(content)
elif self.source_parts is not None:
self.source_parts.append(content)
@click.command(name='check-drops')
@click.option('-f', '--filelist', type=click.File('rb'),
**xml_option_args['filelists'],
help='Location of the filelist xml.gz file '
'(required if not found automatically)')
@click.option('-p', '--primary', type=click.File('rb'),
**xml_option_args['primary'],
help='Location of the primary xml.gz file '
'(required if not found automatically)')
@click.option('--cache-sax/--no-cache-sax',
help='Use cached results of filelist parsing, if available '
'(crude; use when hacking on other parts of the code)')
@click.option('--cache-rpms/--no-cache-rpms',
help='Use previously downloaded RPMs '
'(crude; use when hacking on other parts of the code)')
@click.pass_context
def check_drops(ctx, filelist, primary, cache_sax, cache_rpms):
"""Check packages that should be dropped from the distribution."""
db = ctx.obj['db']
cache_dir.mkdir(exist_ok=True)
# Analyze filelists.xml.gz and primary.xml.gz
cache_path = cache_dir / 'sax_results.json'
if (cache_sax and cache_path.exists()):
with cache_path.open('r') as f:
results, sources = json.load(f)
else:
filelist = gzip.GzipFile(fileobj=filelist, mode='r')
handler = SaxFilesHandler()
xml.sax.parse(filelist, handler)
results = handler.results
primary = gzip.GzipFile(fileobj=primary, mode='r')
handler = SaxPrimaryHandler()
xml.sax.parse(primary, handler)
sources = handler.sources
with cache_path.open('w') as f:
json.dump([results, sources], f)
log('Packages considered: ', len(results))
# For packages with entrypoints, download the corresponding RPM
entrypoint_packages = []
for name, result in results.items():
entrypoints = result.get('entrypoints')
if entrypoints and not result.get('keep'):
entrypoint_packages.append(name)
log('Packages with interesting entrypoints: ', len(entrypoint_packages))
rpm_dl_path = cache_dir / 'rpm_cache'
if rpm_dl_path.exists() and not cache_rpms:
shutil.rmtree(rpm_dl_path)
rpm_dl_path.mkdir(exist_ok=True)
subprocess.run(
['dnf', 'download', '--repo=rawhide', '--',
*entrypoint_packages],
cwd=rpm_dl_path,
stdout=sys.stderr,
check=True)
# Analyze entrypoints from downloaded RPMs
for rpm_path in rpm_dl_path.iterdir():
proc = subprocess.run(
['rpm', '-q', '--qf', '%{name}', '-p', rpm_path],
stdout=subprocess.PIPE,
check=True)
name = proc.stdout.decode('utf-8')
result = results.get(name)
if result:
for entrypoint in result.get('entrypoints'):
rmp2cpio_proc = subprocess.Popen(
['rpm2cpio', rpm_path],
stdout=subprocess.PIPE)
cpio_proc = subprocess.run(
['cpio', '-i', '--to-stdout', '.' + entrypoint],
stdout=subprocess.PIPE,
stdin=rmp2cpio_proc.stdout,
check=True)
if rmp2cpio_proc.wait() != 0:
raise Exception()
config = configparser.ConfigParser()
if not cpio_proc.stdout:
result.setdefault('empty_entrypoints', []).append(entrypoint)
result['needs_investigation'] = True
result['keep'] = True
continue
try:
config.read_string(cpio_proc.stdout.decode('utf-8'))
except configparser.Error as e:
result.setdefault('bad_entrypoints', {})[entrypoint] = str(e)
result['needs_investigation'] = True
result['keep'] = True
continue
handle_entrypoints(result, config)
result['entrypoints_handled'] = True
# Adjust "needs_investigation" for unknown files and unhandled entrypoints
for name, result in results.items():
if not result.get('keep'):
entrypoints = result.get('entrypoints')
if result.get('entrypoints'):
if not result.pop('entrypoints_handled'):
result.notes.append('Entrypoints not handled')
result['needs_investigation'] = True
if result.get('filename_unknown'):
result['needs_investigation'] = True
# Set legacy_leaf flags
query = db.query(tables.RPM)
for rpm in query:
# TODO: better way to match portingdb entry to package name
name = rpm.rpm_name.rsplit('-', 2)[0]
result = results.get(name)
if result:
result['legacy_leaf'] = rpm.legacy_leaf
# hardcoded packages
# catfish is seriously mispackaged,
# see https://src.fedoraproject.org/rpms/catfish/pull-request/1
if 'catfish' in results:
results['catfish']['needs_investigation'] = True
# rpkg needs to stay for 3rd party consumers
results['python2-rpkg']['keep'] = True
for result in results.values():
if result.get('needs_investigation'):
result['verdict'] = 'investigate'
elif result.get('keep'):
result['verdict'] = 'keep'
elif result.get('legacy_leaf'):
result['verdict'] = 'drop_now'
else:
result['verdict'] = 'drop_later'
# Set sources and determine retirement action
for name, result in results.items():
result['source'], *_ = (s for s, p in sources.items() if name in p)
for source, pkgs in sources.items():
local_results = [r for r in results.values() if r['name'] in pkgs]
if len(local_results) < len(pkgs):
# subpackages we know nothing about
source_verdict = 'keep'
elif all(r['verdict'] == 'drop_now' for r in local_results):
source_verdict = 'retire_now'
elif all(r['verdict'].startswith('drop_') for r in local_results):
source_verdict = 'retire_later'
else:
source_verdict = 'keep'
for result in local_results:
result['source_verdict'] = source_verdict
# Output it all
print(json.dumps(results, indent=2))
with open(cache_dir / ('results.json'), 'w') as f:
json.dump(results, f, indent=2)
with open(cache_dir / ('results-sources.json'), 'w') as f:
json.dump(sources, f, indent=2)
log('\nBinary packages:')
stats_counter = collections.Counter(r['verdict'] for r in results.values())
for package, number in stats_counter.most_common():
log('{}: {}'.format(number, package))
for verdict in stats_counter:
filtered = {n: r for n, r in results.items() if r['verdict'] == verdict}
with open(cache_dir / ('results-' + verdict + '.json'), 'w') as f:
json.dump(filtered, f, indent=2)
with open(cache_dir / ('results-' + verdict + '.txt'), 'w') as f:
for name in filtered:
print(name, file=f)
log('\nSource packages:')
# we will loose some information here, but that is OK for stats
source_results = {result['source']: result for result in results.values()}
stats_counter = collections.Counter(r['source_verdict'] for r in source_results.values())
for package, number in stats_counter.most_common():
log('{}: {}'.format(number, package))
for verdict in stats_counter:
if verdict == 'keep':
continue
filtered = {n: r for n, r in results.items() if r['source_verdict'] == verdict}
with open(cache_dir / ('results-' + verdict + '-srpms.json'), 'w') as f:
json.dump(filtered, f, indent=2)
with open(cache_dir / ('results-' + verdict + '-srpms.txt'), 'w') as f:
for name in set(r['source'] for r in filtered.values()):
print(name, file=f)
| |
# Copyright (c) 2006-2013 Regents of the University of Minnesota.
# For licensing terms, see the file LICENSE.
import gc
import os
import sys
import time
import conf
import g
from grax.access_level import Access_Level
from grax.item_manager import Item_Manager
from gwis.query_overlord import Query_Overlord
from item import link_value
from item.feat import branch
from item.feat import byway
from item.util import ratings
from item.util import revision
from item.util.item_query_builder import Item_Query_Builder
from item.util.item_type import Item_Type
from util_.log_progger import Debug_Progress_Logger
from util_ import db_glue
from util_ import mem_usage
from util_ import misc
log = g.log.getLogger('tgraph_base')
__all__ = ('Trans_Graph_Base',)
class Trans_Graph_Base(object):
'Transportation network as a graph.'
__slots__ = (
'route_daemon',
#
# The user on behalf of whom we're loading the map. Usually, this is the
# anonymous public user.
'username',
#
# The branch hierarchy of the road network being loaded.
'branch_hier',
#
# Either revision.Current to follow the head, or revision.Historic to
# just load a static graph at the specified revision.
'revision',
#
# Maximum revision ID represented in the current graph. Used to
# incrementally update the street network when a new revision is saved.
'rid_max',
#
# In CcpV1, the ratings model is managed by the callers that make the
# transit graphs. But we only ever use the same ratings.Predictor()
# class, and we always have one Predictor per Trans_Graph. It makes
# things a little easier to just have the graph also manage the model.
'ratings',
#
# Rather than keeping a collection of heavyweight byways, we use a
# somewhat-lighterweight collection of route_steps.
'step_lookup',
#
# We keep a handle to the db connection while updating so that we can be
# canceled.
'update_db',
)
# *** Constructor
def __init__(self, route_daemon,
username=None, branch_hier=None, revision_=None):
self.route_daemon = route_daemon
self.username = username or route_daemon.cli_opts.username
self.branch_hier = branch_hier or route_daemon.cli_args.branch_hier
self.revision = revision_ or route_daemon.cli_args.revision
g.assurt(self.username and self.branch_hier and self.revision)
g.assurt(isinstance(self.revision, revision.Current)
or isinstance(self.revision, revision.Historic))
self.update_db = None
self.destroy()
# *** Memory management
# Cleanup any memory that the garbage collector won't. This is a no-op for
# the p1 route finder, since it uses only Pure Python Objects. But for the
# p2 route finder, we need to actively destroy C-objects.
def destroy(self):
self.rid_max = 0
self.ratings = None
self.step_lookup = dict()
if self.update_db is not None:
self.update_db.close()
self.update_db = None
# *** Public interface: load (or update) the transportation network.
#
def load(self, keep_running=None):
# Get a handle to the db. We'll get a fresh handle if updating again.
# Bug 2688: ERROR: could not serialize access due to concurrent update
# This used to happen because we used a serializable trans-
# action, which we no longer do. Serializable transactions
# are an annoyance; it's better to do assertive locking.
# NO: self.update_db = db_glue.new(trans_serializable=True)
# Just get a new db handle, which sets up the transaction as read
# committed, so we only see data from whence the db handle is created. So
# once we call new(), our view of the data won't change. And since we're
# updating a specific revision to Current(), we'll be a-okay, since the
# data view doesn't change. But once it's done, we'll want to re-check
# the latest revision ID and see if we have to update again. Which we do,
# in load_wrap, by recycling the cursor...
self.update_db = db_glue.new()
try:
self.load_wrap(keep_running=None)
finally:
self.update_db.close()
self.update_db = None
#
def load_wrap(self, keep_running=None):
# If there's a save while we're updating, we'll want to update again.
update_again = True
while update_again:
update_again = False
# Check the last revision, or just load for the first time.
load_graph = False
if isinstance(self.revision, revision.Current):
rid_latest = revision.Revision.revision_max(self.update_db)
if self.rid_max != rid_latest:
g.assurt(rid_latest > self.rid_max)
load_graph = True
else:
g.assurt(isinstance(self.revision, revision.Historic))
rid_latest = self.revision.rid
if self.rid_max == 0:
# Fist time loading.
load_graph = True
else:
# We've already loaded, and since it's Historic, nothing to do.
g.assurt(rid_latest == self.rid_max)
if load_graph:
try:
qb_curr = self.load_make_qb_new(rid_latest)
if qb_curr is not None:
self.load_really(qb_curr, keep_running)
else:
# qb_curr is only None if there's nothing
# to do, which we've already checked for.
g.assurt(False)
except g.Ccp_Shutdown, e:
raise
except Exception, e:
log.error('load: Unexpected error: %s' % (str(e),))
raise
# See if the map was saved while we were updating.
if isinstance(self.revision, revision.Current):
# Recycle the cursor, so we can see if the revision changed.
# (This is a trick; rollback the cursor and get a fresh one,
# since the old cursor only shows data from when our
# transaction began.)
self.update_db.transaction_rollback()
# Check the latest revision ID.
rid_latest = revision.Revision.revision_max(self.update_db)
# Update again if different.
if self.rid_max != rid_latest:
g.assurt(rid_latest > self.rid_max)
update_again = True
else:
log.debug('state_update: skipping update.')
g.assurt(not update_again)
#
def load_really(self, qb_curr, keep_running=None):
'''Load the transport network from the database.'''
g.check_keep_running(keep_running)
t0_all = time.time()
usage_0 = None
if conf.debug_mem_usage:
usage_0 = mem_usage.get_usage_mb()
log.info('load: mem_usage: beg: %.2f Mb' % (usage_0,))
# Load ratings.
# NOTE: To find its usage, search graph.ratings.
if self.ratings is None:
g.assurt(isinstance(qb_curr.revision, revision.Historic))
self.ratings = ratings.Predictor(self)
# Load all ratings or just update what's changed since we last checked.
self.ratings.load(qb_curr.db, keep_running=keep_running)
# Load byways, and attrs and tags.
try:
if self.route_daemon.cli_opts.regions:
qb_curr.filters.filter_by_regions = (
self.route_daemon.cli_opts.regions)
except AttributeError:
pass
log.debug('load: calling load_feats_and_attcs...')
prog_log = Debug_Progress_Logger(log_freq=25000)
if isinstance(qb_curr.revision, revision.Historic):
qb_curr.item_mgr.load_feats_and_attcs(qb_curr, byway,
'search_by_network', self.add_byway_loaded, prog_log,
heavyweight=False, fetch_size=0, keep_running=keep_running)
else:
g.assurt(isinstance(qb_curr.revision, revision.Updated))
qb_curr.item_mgr.update_feats_and_attcs(qb_curr, byway,
'search_by_network', self.add_byway_updated, prog_log,
heavyweight=False, fetch_size=0, keep_running=keep_running)
# Add transit.
self.load_make_graph_add_transit(qb_curr)
# All done loading.
conf.debug_log_mem_usage(log, usage_0, 'tgraph_base.load_really')
log.info(
'/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\')
log.info('load: complete: for %s in %s'
% (qb_curr.revision.short_name(),
misc.time_format_elapsed(t0_all),))
log.info(
'/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\/*\\')
qb_curr.definalize()
qb_curr = None
#
def add_byway_loaded(self, qb, bway, prog_log):
# For the initial load, we won't fetch deleted or restricted items,
# but we will while updating.
if ( (not bway.deleted)
and (not bway.is_disconnected)
and (bway.access_level_id <= Access_Level.client)
and (not bway.tagged.intersection(
byway.Geofeature_Layer.controlled_access_tags))
and (bway.geofeature_layer_id
not in byway.Geofeature_Layer.controlled_access_gfids)
# FIXME/BUG nnnn: See also geofeature.control_of_access.
):
# MEMORY MANAGEMENT: Maybe only fetch geometry once the route is
# computed? For now the route finder bloats itself by loading all the
# geometry for everything into memory.
# BUG nnnn: Reduce memory usage, leave geometry in the database?
# ?? bway.geometry_svg = None
self.load_make_graph_insert_new(bway)
#
def add_byway_updated(self, qb, bway, prog_log):
# If we've loaded the map at least once, remove the old version of this
# byway before inserting the new one. For help debugging, we emit a
# warning on removal if the byway doesn't exist.
log.verbose('add_byway_updated: bid: %d / eid: %d / %s'
% (bway.beg_node_id, bway.fin_node_id, str(bway)))
g.assurt((bway.version > 1) or (not bway.deleted))
self.load_make_graph_remove_old(bway)
self.add_byway_loaded(qb, bway, prog_log)
# *** Helper functions: make the Item_Query_Builder object.
#
def load_make_qb_new(self, rid_latest):
g.assurt(rid_latest > 0)
rid_min = self.rid_max
self.rid_max = rid_latest
if isinstance(self.revision, revision.Current):
if rid_min > 0:
update_only = True
else:
g.assurt(isinstance(self.revision, revision.Historic))
self.rid_max = self.revision.rid
rev = None
branch_hier = None
if rid_min == self.rid_max:
# The caller should already have checked that we have work to do.
log.error('load_make_qb_new: rid_min == self.rid_max')
rev_hist = None
else:
# We always need a historic revision, since we always update the attr
# and tag cache.
rev_hist = revision.Historic(self.rid_max)
# If rid_min is already set, do an Update.
if rid_min > 0:
log.debug('load_make_qb_new: fr. %d to %d'
% (rid_min, self.rid_max,))
g.assurt(isinstance(self.revision, revision.Current))
# If we've already loaded byways, we're updating the map,
# and we want to fetch changed byways, including deleted or
# restricted-access byways, so we can remove those edges from the
# transportation graph.
rev_fetch = revision.Updated(rid_min, self.rid_max)
else:
# We're loading the map for the first time.
rev_fetch = rev_hist
qb_fetch = None
if rev_hist is not None:
branch_hier = branch.Many.branch_hier_build(self.update_db,
self.branch_hier[0][0], rev_hist)
qb_fetch = Item_Query_Builder(self.update_db, self.username,
branch_hier, rev_fetch)
# The Item_Manager class will make a table of all changed items by
# stack_id, and it'll join that against a normal Historic query, so
# we need to keep the username for the Historic query.
# NO:
# if isinstance(rev_fetch, revision.Updated):
# qb_fetch.username = None
# qb_fetch.filters.gia_userless = True
# Because we're using revision.Updated, we need to tell search_get_sql
# not to worry.
qb_fetch.request_is_local = True
qb_fetch.request_is_script = False # True if user running it.
# This populates the user gids and sets up geometry queries. Neither
# or which should be necessary.
Query_Overlord.finalize_query(qb_fetch)
if rev_fetch != rev_hist:
qb_hist = Item_Query_Builder(self.update_db, self.username,
branch_hier, rev_hist)
Query_Overlord.finalize_query(qb_hist)
else:
qb_hist = qb_fetch
# Load the link_value caches for the byways, since we need tags and
# attributes for the cost function.
qb_fetch.item_mgr = Item_Manager()
# NOTE: Whether rev is Historic or Updated, we'll load attrs and tags
# for a specific revision ID. For Historic, we'll load them for the
# historic rev ID, and for Updated, we'll load 'em for rid_max.
# BUG nnnn: With too many tags... we'll want to have a service
# running to handle web requests (so they can always be resident)?
# bahh...
qb_fetch.item_mgr.load_cache_attachments(qb_hist)
return qb_fetch
# *** Helper functions: add and remove byways from the transportation graph.
#
def load_make_graph_insert_new(self, new_byway):
g.assurt(False) # Abstract
#
def load_make_graph_remove_old(self, old_byway):
g.assurt(False) # Abstract
#
def load_make_graph_add_transit(self, qb):
log.debug('load: transit not enabled for this finder.')
pass
n_steps_already_in_lookup = 0
#
def step_lookup_append(self, byway_stack_id, rt_step):
#if byway_stack_id not in self.step_lookup:
# self.step_lookup[byway_stack_id] = list()
self.step_lookup.setdefault(byway_stack_id, list())
if rt_step not in self.step_lookup[byway_stack_id]:
self.step_lookup[byway_stack_id].append(rt_step)
else:
log.warning('step_lookup_append: already in lookup: stk: %s / %s'
% (byway_stack_id, rt_step,))
Trans_Graph_Base.n_steps_already_in_lookup += 1
if Trans_Graph_Base.n_steps_already_in_lookup < 3:
pass
#
def byway_has_tags(self, b, tags):
try:
byway_stack_id = b.stack_id
except AttributeError:
byway_stack_id = int(b)
try:
byway_tags = self.step_lookup_get(byway_stack_id).tagged
has_tags = (len(set(tags).intersection(byway_tags)) > 0)
except KeyError:
has_tags = False
return has_tags
#
def step_lookup_get(self, byway_stack_id):
return self.step_lookup[byway_stack_id][0]
# ***
# ***
| |
"""Test zha switch."""
from unittest.mock import call, patch
import pytest
import zigpy.profiles.zha as zha
import zigpy.zcl.clusters.general as general
import zigpy.zcl.foundation as zcl_f
from homeassistant.components.switch import DOMAIN
from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE
from .common import (
async_enable_traffic,
async_find_group_entity_id,
async_test_rejoin,
find_entity_id,
get_zha_gateway,
send_attributes_report,
)
from tests.common import mock_coro
ON = 1
OFF = 0
IEEE_GROUPABLE_DEVICE = "01:2d:6f:00:0a:90:69:e8"
IEEE_GROUPABLE_DEVICE2 = "02:2d:6f:00:0a:90:69:e8"
@pytest.fixture
def zigpy_device(zigpy_device_mock):
"""Device tracker zigpy device."""
endpoints = {
1: {
"in_clusters": [general.Basic.cluster_id, general.OnOff.cluster_id],
"out_clusters": [],
"device_type": 0,
}
}
return zigpy_device_mock(endpoints)
@pytest.fixture
async def coordinator(hass, zigpy_device_mock, zha_device_joined):
"""Test zha light platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [],
"out_clusters": [],
"device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT,
}
},
ieee="00:15:8d:00:02:32:4f:32",
nwk=0x0000,
node_descriptor=b"\xf8\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@pytest.fixture
async def device_switch_1(hass, zigpy_device_mock, zha_device_joined):
"""Test zha switch platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [general.OnOff.cluster_id],
"out_clusters": [],
"device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT,
}
},
ieee=IEEE_GROUPABLE_DEVICE,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@pytest.fixture
async def device_switch_2(hass, zigpy_device_mock, zha_device_joined):
"""Test zha switch platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [general.OnOff.cluster_id],
"out_clusters": [],
"device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT,
}
},
ieee=IEEE_GROUPABLE_DEVICE2,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
async def test_switch(hass, zha_device_joined_restored, zigpy_device):
"""Test zha switch platform."""
zha_device = await zha_device_joined_restored(zigpy_device)
cluster = zigpy_device.endpoints.get(1).on_off
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
assert entity_id is not None
assert hass.states.get(entity_id).state == STATE_OFF
await async_enable_traffic(hass, [zha_device], enabled=False)
# test that the switch was created and that its state is unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, [zha_device])
# test that the state has changed from unavailable to off
assert hass.states.get(entity_id).state == STATE_OFF
# turn on at switch
await send_attributes_report(hass, cluster, {1: 0, 0: 1, 2: 2})
assert hass.states.get(entity_id).state == STATE_ON
# turn off at switch
await send_attributes_report(hass, cluster, {1: 1, 0: 0, 2: 2})
assert hass.states.get(entity_id).state == STATE_OFF
# turn on from HA
with patch(
"zigpy.zcl.Cluster.request",
return_value=mock_coro([0x00, zcl_f.Status.SUCCESS]),
):
# turn on via UI
await hass.services.async_call(
DOMAIN, "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(cluster.request.mock_calls) == 1
assert cluster.request.call_args == call(
False, ON, (), expect_reply=True, manufacturer=None, tsn=None
)
# turn off from HA
with patch(
"zigpy.zcl.Cluster.request",
return_value=mock_coro([0x01, zcl_f.Status.SUCCESS]),
):
# turn off via UI
await hass.services.async_call(
DOMAIN, "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(cluster.request.mock_calls) == 1
assert cluster.request.call_args == call(
False, OFF, (), expect_reply=True, manufacturer=None, tsn=None
)
# test joining a new switch to the network and HA
await async_test_rejoin(hass, zigpy_device, [cluster], (1,))
async def async_test_zha_group_switch_entity(
hass, device_switch_1, device_switch_2, coordinator
):
"""Test the switch entity for a ZHA group."""
zha_gateway = get_zha_gateway(hass)
assert zha_gateway is not None
zha_gateway.coordinator_zha_device = coordinator
coordinator._zha_gateway = zha_gateway
device_switch_1._zha_gateway = zha_gateway
device_switch_2._zha_gateway = zha_gateway
member_ieee_addresses = [device_switch_1.ieee, device_switch_2.ieee]
# test creating a group with 2 members
zha_group = await zha_gateway.async_create_zigpy_group(
"Test Group", member_ieee_addresses
)
await hass.async_block_till_done()
assert zha_group is not None
assert len(zha_group.members) == 2
for member in zha_group.members:
assert member.ieee in member_ieee_addresses
entity_id = async_find_group_entity_id(hass, DOMAIN, zha_group)
assert hass.states.get(entity_id) is not None
group_cluster_on_off = zha_group.endpoint[general.OnOff.cluster_id]
dev1_cluster_on_off = device_switch_1.endpoints[1].on_off
dev2_cluster_on_off = device_switch_2.endpoints[1].on_off
# test that the lights were created and that they are unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, zha_group.members)
# test that the lights were created and are off
assert hass.states.get(entity_id).state == STATE_OFF
# turn on from HA
with patch(
"zigpy.zcl.Cluster.request",
return_value=mock_coro([0x00, zcl_f.Status.SUCCESS]),
):
# turn on via UI
await hass.services.async_call(
DOMAIN, "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(group_cluster_on_off.request.mock_calls) == 1
assert group_cluster_on_off.request.call_args == call(
False, ON, (), expect_reply=True, manufacturer=None, tsn=None
)
assert hass.states.get(entity_id).state == STATE_ON
# turn off from HA
with patch(
"zigpy.zcl.Cluster.request",
return_value=mock_coro([0x01, zcl_f.Status.SUCCESS]),
):
# turn off via UI
await hass.services.async_call(
DOMAIN, "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(group_cluster_on_off.request.mock_calls) == 1
assert group_cluster_on_off.request.call_args == call(
False, OFF, (), expect_reply=True, manufacturer=None, tsn=None
)
assert hass.states.get(entity_id).state == STATE_OFF
# test some of the group logic to make sure we key off states correctly
await dev1_cluster_on_off.on()
await dev2_cluster_on_off.on()
# test that group light is on
assert hass.states.get(entity_id).state == STATE_ON
await dev1_cluster_on_off.off()
# test that group light is still on
assert hass.states.get(entity_id).state == STATE_ON
await dev2_cluster_on_off.off()
# test that group light is now off
assert hass.states.get(entity_id).state == STATE_OFF
await dev1_cluster_on_off.on()
# test that group light is now back on
assert hass.states.get(entity_id).state == STATE_ON
| |
#!/usr/bin/env python
"""
Usage: make_lite.py <wrapped_routines_file> <lapack_dir> <output_dir>
Typical invocation:
make_lite.py wrapped_routines /tmp/lapack-3.x.x
Requires the following to be on the path:
* f2c
* patch
"""
from __future__ import division, absolute_import, print_function
import sys
import os
import subprocess
import shutil
import fortran
import clapack_scrub
PY2 = sys.version_info < (3, 0)
if PY2:
from distutils.spawn import find_executable as which
else:
from shutil import which
# Arguments to pass to f2c. You'll always want -A for ANSI C prototypes
# Others of interest: -a to not make variables static by default
# -C to check array subscripts
F2C_ARGS = ['-A', '-Nx800']
# The header to add to the top of the f2c_*.c file. Note that dlamch_() calls
# will be replaced by the macros below by clapack_scrub.scrub_source()
HEADER = '''\
/*
NOTE: This is generated code. Look in Misc/lapack_lite for information on
remaking this file.
*/
#include "f2c.h"
#ifdef HAVE_CONFIG
#include "config.h"
#else
extern doublereal dlamch_(char *);
#define EPSILON dlamch_("Epsilon")
#define SAFEMINIMUM dlamch_("Safe minimum")
#define PRECISION dlamch_("Precision")
#define BASE dlamch_("Base")
#endif
extern doublereal dlapy2_(doublereal *x, doublereal *y);
/*
f2c knows the exact rules for precedence, and so omits parentheses where not
strictly necessary. Since this is generated code, we don't really care if
it's readable, and we know what is written is correct. So don't warn about
them.
*/
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wparentheses"
#endif
'''
class FortranRoutine(object):
"""Wrapper for a Fortran routine in a file.
"""
type = 'generic'
def __init__(self, name=None, filename=None):
self.filename = filename
if name is None:
root, ext = os.path.splitext(filename)
name = root
self.name = name
self._dependencies = None
def dependencies(self):
if self._dependencies is None:
deps = fortran.getDependencies(self.filename)
self._dependencies = [d.lower() for d in deps]
return self._dependencies
def __repr__(self):
return "FortranRoutine({!r}, filename={!r})".format(self.name, self.filename)
class UnknownFortranRoutine(FortranRoutine):
"""Wrapper for a Fortran routine for which the corresponding file
is not known.
"""
type = 'unknown'
def __init__(self, name):
FortranRoutine.__init__(self, name=name, filename='<unknown>')
def dependencies(self):
return []
class FortranLibrary(object):
"""Container for a bunch of Fortran routines.
"""
def __init__(self, src_dirs):
self._src_dirs = src_dirs
self.names_to_routines = {}
def _findRoutine(self, rname):
rname = rname.lower()
for s in self._src_dirs:
ffilename = os.path.join(s, rname + '.f')
if os.path.exists(ffilename):
return self._newFortranRoutine(rname, ffilename)
return UnknownFortranRoutine(rname)
def _newFortranRoutine(self, rname, filename):
return FortranRoutine(rname, filename)
def addIgnorableRoutine(self, rname):
"""Add a routine that we don't want to consider when looking at
dependencies.
"""
rname = rname.lower()
routine = UnknownFortranRoutine(rname)
self.names_to_routines[rname] = routine
def addRoutine(self, rname):
"""Add a routine to the library.
"""
self.getRoutine(rname)
def getRoutine(self, rname):
"""Get a routine from the library. Will add if it's not found.
"""
unique = []
rname = rname.lower()
routine = self.names_to_routines.get(rname, unique)
if routine is unique:
routine = self._findRoutine(rname)
self.names_to_routines[rname] = routine
return routine
def allRoutineNames(self):
"""Return the names of all the routines.
"""
return list(self.names_to_routines.keys())
def allRoutines(self):
"""Return all the routines.
"""
return list(self.names_to_routines.values())
def resolveAllDependencies(self):
"""Try to add routines to the library to satisfy all the dependencies
for each routine in the library.
Returns a set of routine names that have the dependencies unresolved.
"""
done_this = set()
last_todo = set()
while True:
todo = set(self.allRoutineNames()) - done_this
if todo == last_todo:
break
for rn in todo:
r = self.getRoutine(rn)
deps = r.dependencies()
for d in deps:
self.addRoutine(d)
done_this.add(rn)
last_todo = todo
return todo
class LapackLibrary(FortranLibrary):
def _newFortranRoutine(self, rname, filename):
routine = FortranLibrary._newFortranRoutine(self, rname, filename)
if 'blas' in filename.lower():
routine.type = 'blas'
elif 'install' in filename.lower():
routine.type = 'config'
elif rname.startswith('z'):
routine.type = 'z_lapack'
elif rname.startswith('c'):
routine.type = 'c_lapack'
elif rname.startswith('s'):
routine.type = 's_lapack'
elif rname.startswith('d'):
routine.type = 'd_lapack'
else:
routine.type = 'lapack'
return routine
def allRoutinesByType(self, typename):
routines = sorted((r.name, r) for r in self.allRoutines() if r.type == typename)
return [a[1] for a in routines]
def printRoutineNames(desc, routines):
print(desc)
for r in routines:
print('\t%s' % r.name)
def getLapackRoutines(wrapped_routines, ignores, lapack_dir):
blas_src_dir = os.path.join(lapack_dir, 'BLAS', 'SRC')
if not os.path.exists(blas_src_dir):
blas_src_dir = os.path.join(lapack_dir, 'blas', 'src')
lapack_src_dir = os.path.join(lapack_dir, 'SRC')
if not os.path.exists(lapack_src_dir):
lapack_src_dir = os.path.join(lapack_dir, 'src')
install_src_dir = os.path.join(lapack_dir, 'INSTALL')
if not os.path.exists(install_src_dir):
install_src_dir = os.path.join(lapack_dir, 'install')
library = LapackLibrary([install_src_dir, blas_src_dir, lapack_src_dir])
for r in ignores:
library.addIgnorableRoutine(r)
for w in wrapped_routines:
library.addRoutine(w)
library.resolveAllDependencies()
return library
def getWrappedRoutineNames(wrapped_routines_file):
routines = []
ignores = []
with open(wrapped_routines_file) as fo:
for line in fo:
line = line.strip()
if not line or line.startswith('#'):
continue
if line.startswith('IGNORE:'):
line = line[7:].strip()
ig = line.split()
ignores.extend(ig)
else:
routines.append(line)
return routines, ignores
types = {'blas', 'lapack', 'd_lapack', 's_lapack', 'z_lapack', 'c_lapack', 'config'}
def dumpRoutineNames(library, output_dir):
for typename in {'unknown'} | types:
routines = library.allRoutinesByType(typename)
filename = os.path.join(output_dir, typename + '_routines.lst')
with open(filename, 'w') as fo:
for r in routines:
deps = r.dependencies()
fo.write('%s: %s\n' % (r.name, ' '.join(deps)))
def concatenateRoutines(routines, output_file):
with open(output_file, 'w') as output_fo:
for r in routines:
with open(r.filename, 'r') as fo:
source = fo.read()
output_fo.write(source)
class F2CError(Exception):
pass
def runF2C(fortran_filename, output_dir):
fortran_filename = fortran_filename.replace('\\', '/')
try:
subprocess.check_call(
["f2c"] + F2C_ARGS + ['-d', output_dir, fortran_filename]
)
except subprocess.CalledProcessError:
raise F2CError
def scrubF2CSource(c_file):
with open(c_file) as fo:
source = fo.read()
source = clapack_scrub.scrubSource(source, verbose=True)
with open(c_file, 'w') as fo:
fo.write(HEADER)
fo.write(source)
def ensure_executable(name):
try:
which(name)
except:
raise SystemExit(name + ' not found')
def main():
if len(sys.argv) != 3:
print(__doc__)
return
# Make sure that patch and f2c are found on path
ensure_executable('f2c')
ensure_executable('patch')
wrapped_routines_file = sys.argv[1]
lapack_src_dir = sys.argv[2]
output_dir = os.path.join(os.path.dirname(__file__), 'build')
try:
shutil.rmtree(output_dir)
except:
pass
os.makedirs(output_dir)
wrapped_routines, ignores = getWrappedRoutineNames(wrapped_routines_file)
library = getLapackRoutines(wrapped_routines, ignores, lapack_src_dir)
dumpRoutineNames(library, output_dir)
for typename in types:
fortran_file = os.path.join(output_dir, 'f2c_%s.f' % typename)
c_file = fortran_file[:-2] + '.c'
print('creating %s ...' % c_file)
routines = library.allRoutinesByType(typename)
concatenateRoutines(routines, fortran_file)
# apply the patchpatch
patch_file = os.path.basename(fortran_file) + '.patch'
if os.path.exists(patch_file):
subprocess.check_call(['patch', '-u', fortran_file, patch_file])
print("Patched {}".format(fortran_file))
try:
runF2C(fortran_file, output_dir)
except F2CError:
print('f2c failed on %s' % fortran_file)
break
scrubF2CSource(c_file)
# patch any changes needed to the C file
c_patch_file = c_file + '.patch'
if os.path.exists(c_patch_file):
subprocess.check_call(['patch', '-u', c_file, c_patch_file])
print()
for fname in os.listdir(output_dir):
if fname.endswith('.c'):
print('Copying ' + fname)
shutil.copy(
os.path.join(output_dir, fname),
os.path.dirname(__file__),
)
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
# contains ~*~ magic ~*~ installation code.
import os, sys, time
C4_FINDAPR_PATH = "./lib/c4/cmake/FindApr.cmake"
SETUP_DEBUG = True
DEBUG = True
#################
# GETAPR_LIST #
#################
def getAPR_list() :
cmd = 'find / -name "apr_file_io.h" | grep -v "Permission denied" > out.txt'
print "Finding Apache Runtime library using command: " + cmd
time.sleep(5) # message to user
os.system( cmd )
fo = open( "out.txt", "r" )
pathList = []
for path in fo :
path = path.strip()
path_split = path.split( "/" )
path_split = path_split[:len(path_split)-1]
path = "/".join( path_split )
pathList.append( path )
os.system( 'rm out.txt' )
return pathList
##########################
# SET PYLDFI VIZ PATHS #
##########################
# set the p5.js and p5.dom.js paths in pyLDFIviz.html
def set_PYLDFI_VIZ_paths() :
p5_paths = getP5_paths()
p5dom_paths = getP5dom_paths()
if DEBUG :
print "ALL p5.js paths :"
print p5_paths
print "ALL p5.dom.js paths :"
print p5dom_paths
chosen_p5 = None
chosen_p5dom = None
# pick a p5.js path
for path in p5_paths :
if "/lib/p5.js" in path :
chosen_p5 = path
# pick a p5.dom.js path
for path in p5dom_paths :
if "/addons/p5.dom.js" in path and not "test/unit" in path :
chosen_p5dom = path
# sanity checks
if not chosen_p5 :
sys.exit( ">>> FATAL ERROR : could not find valid p5.js path. Aborting..." )
if not chosen_p5dom :
sys.exit( ">>> FATAL ERROR : could not find valid p5.dom.js path. Aborting..." )
if DEBUG :
print "chosen_p5 = " + chosen_p5
print "chosen_p5dom = " + chosen_p5dom
# make custom pyLDFIviz.html file
html_tag = "<html>\n"
head_tag = " <head>\n"
p5_line = ' <script language="javascript" type="text/javascript" src="' + chosen_p5 + '"></script>\n'
p5dom_line = ' <script language="javascript" src="' + chosen_p5dom + '"></script>\n'
uiFile = "./src/ui/pyLDFIviz.html"
tempFile = "./src/templateFiles/pyLDFIviz_temp.html"
f = open( uiFile, "w" )
f.write( html_tag )
f.write( head_tag )
f.write( p5_line )
f.write( p5dom_line )
f2 = open( tempFile, "r" )
for line in f2 :
f.write( line )
#####################
# GETP5 DOM PATHS #
#####################
def getP5dom_paths() :
cmd_p5dom = 'find / -name "p5.dom.js" | grep -v "Permission denied" > p5dom_out.txt'
print "Finding p5.dom.js using command: " + cmd_p5dom
time.sleep(5) # message to user
# execute find p5dom
os.system( cmd_p5dom )
# collect paths from save file
fo = open( "p5dom_out.txt", "r" )
pathList = []
for path in fo :
path = path.strip()
pathList.append( path )
os.system( 'rm p5dom_out.txt' )
return pathList
#################
# GETP5 PATHS #
#################
def getP5_paths() :
cmd_p5 = 'find / -name "p5.js" | grep -v "Permission denied" > p5_out.txt'
print "Finding p5.js using command: " + cmd_p5
time.sleep(5) # message to user
# execute find p5
os.system( cmd_p5 )
# collect paths from save file
fo = open( "p5_out.txt", "r" )
pathList = []
for path in fo :
path = path.strip()
pathList.append( path )
os.system( 'rm p5_out.txt' )
return pathList
########################
# DE DUPLICATE SETUP #
########################
# this script modifies the contents of FindAPR.cmake in the c4 submodule
# prior to compilation.
# need to ensure only one SET command exists in FindAPR.cmake after discovering
# a valid apr library.
def deduplicateSetup() :
# http://stackoverflow.com/questions/4710067/deleting-a-specific-line-in-a-file-python
# protect against multiple runs of setup
f = open( C4_FINDAPR_PATH, "r+" )
d = f.readlines()
f.seek(0)
for i in d:
if not "set(APR_INCLUDES" in i :
f.write(i)
f.truncate()
f.close()
#############
# SET APR #
#############
def setAPR( path ) :
# set one of the candidate APR paths
newCmd = 'set(APR_INCLUDES "' + path + '")'
cmd = "(head -48 " + C4_FINDAPR_PATH + "; " + "echo '" + newCmd + "'; " + "tail -n +49 " + C4_FINDAPR_PATH + ")" + " > temp ; mv temp " + C4_FINDAPR_PATH + ";"
os.system( cmd )
os.system( "make c4" )
##########################
# CHECK FOR MAKE ERROR #
##########################
def checkForMakeError( path ) :
flag = True
if os.path.exists( os.path.dirname(os.path.abspath( __file__ )) + "/c4_out.txt" ) :
fo = open( "./c4_out.txt", "r" )
for line in fo :
line = line.strip()
if containsError( line ) :
print "failed path apr = " + path
flag = False
fo.close()
os.system( "rm ./c4_out.txt" ) # clean up
return flag
####################
# CONTAINS ERROR #
####################
def containsError( line ) :
if "error generated." in line :
return True
#elif "Error" in line :
# return True
else :
return False
##########
# MAIN #
##########
def main() :
print "Running pyLDFI setup with args : \n" + str(sys.argv)
# clean any existing libs
os.system( "make clean" )
# download submodules
os.system( "make get-submodules" )
# ---------------------------------------------- #
# run make for orik
os.system( "make orik" )
## ---------------------------------------------- #
## run make for c4
## find candidate apr locations
#apr_path_cands = getAPR_list()
#
## set correct apr location
#flag = True
#for path in apr_path_cands :
# try :
# deduplicateSetup()
# except IOError :
# setAPR( path )
# setAPR( path )
# try :
# flag = checkForMakeError( path )
# except IOError :
# print "./c4_out.txt does not exist"
#
# # found a valid apr library
# if flag :
# print ">>> C4 installed successfully <<<"
# print "... Done installing C4 Datalog evaluator"
# print "C4 install using APR path : " + path
# print "done installing c4."
# break
# else :
# sys.exit( "failed to install C4. No fully functioning APR found." )
# ---------------------------------------------- #
# set p5 file paths
#set_PYLDFI_VIZ_paths()
###################
# CHECK PY DEPS #
###################
# check python package dependencies
def checkPyDeps() :
print "*******************************"
print " CHECKING PYTHON DEPENDECIES "
print "*******************************"
# argparse
import argparse
if argparse.__name__ :
print "argparse...verified"
# pyparsing
import pyparsing
if pyparsing.__name__ :
print "pyparsing...verified"
# sqlite3
import sqlite3
if sqlite3.__name__ :
print "sqlite3...verified"
# pydatalog
#import pyDatalog
#if pyDatalog.__name__ :
# print "pyDatalog...verified"
# pydot
import pydot
if pydot.__name__ :
print "pydot...verified"
# mpmath
import mpmath
if mpmath.__name__ :
print "mpmath...verified"
# sympy
import sympy
if not sympy.__version__ == "1.0.1.dev" :
sys.exit( "FATAL ERROR : unsupported version of package 'sympy' : version " + sympy.__version__ + "\nPyLDFI currently only supports sympy version 1.0.1.dev.\nAborting..." )
else :
print "sympy...verified"
# pycosat
import pycosat
if pycosat.__name__ :
print "pycosat...verified"
print "All python dependencies installed! Yay! =D"
print "*******************************"
print "*******************************"
return None
##############################
# MAIN THREAD OF EXECUTION #
##############################
checkPyDeps()
main()
#########
# EOF #
#########
| |
"""File: dataset.py
Description:
This module contains all the data model used in analysis
History:
0.2.4: ! fix bugs in writecsv
0.2.3: + csv output for Dataset
0.2.2: + Dataset, DataItem classes
0.2.1: add lots of things
0.1.1: Add class DataColumnIterator
0.1.0: The first version
"""
__version__ = '0.2.4'
__author__ = 'SpaceLis'
import random, csv
import re
from anatool.dm.db import CONN_POOL, GEOTWEET
from anatool.analysis.text_util import geo_rect
from annotation import Cache
IDPTN = re.compile(r'[a-z0-9]+')
#---------------------------------------------------------- List Operators
def rand_select(cnt, ratio):
"""Generate two list of random numbers below cnt, the length of
the two lists are at the ratio"""
lst = range(0, cnt)
random.shuffle(lst)
pivot = int(cnt * ratio)
return lst[:pivot - 1], lst[pivot:]
def ratio_select(cnt, ratio):
"""Generate two list of consecutive numbers according to the ratio"""
return range(0, int(cnt * ratio)), range(int(cnt * ratio), cnt)
def list_split(lst, cnt):
"""splite list into cnt parts"""
pivot = list()
length = len(lst) / cnt
pos = 0
for idx in range(cnt - 1):
pivot.append(lst[pos:pos+length])
pos += length
pivot.append(lst[pos:])
return pivot
#---------------------------------------------------------- Dataset
class Dataset(dict):
"""Dataset is a column oriented data storage. The key is the title of the
column while the value is the list of data in the column (key) in a
sequential order.
"""
def __init__(self, *arg, **karg):
super(Dataset, self).__init__(*arg, **karg)
self.sortedkey = None
def size(self):
"""the size of the dataset, i.e., the number of rows
"""
if len(self) == 0:
return 0
return len(self.itervalues().next())
def append(self, item):
"""Add a new data item into the dataset
This is just for mocking list().append()
"""
_size = self.size()
for key in item.iterkeys():
if key not in self:
# TODO add default values for unseen key
self[key] = [0 for idx in range(_size)]
self[key].append(item[key])
def extend(self, itemlist):
"""Extend the dataset with the itemlist
This is just for mocking list().extend()
"""
for item in itemlist:
self.append(item)
return self
def distinct(self, key):
"""Return the value set of the key
"""
vset = set()
for val in self[key]:
vset.add(val)
return [val for val in vset]
def sorted_items(self, key):
""" Access the sequence of items in sorted manner based on key
"""
indices = sorted(range(self.size()), key=lambda x:self[key][x])
for idx in indices:
yield self.item(idx)
def groupfunc(self, key, pkey, func):
"""Return the output of a function to the values grouped by key
"""
rst = DataItem()
indices = sorted(range(self.size), key=lambda x:self[key][x])
temp = list()
idx_val = type(self[key][0]).__init__()
for idx in indices:
if idx_val != self[key][idx]:
if len(temp)>0:
rst[idx_val] = func(temp)
temp = list()
idx_val = self[key][idx]
temp.append(self[pkey][idx])
rst[idx_val] = func(temp)
return rst
def merge(self, dset):
"""Merge the keys and values into this Dataset
"""
if self.size() != dset.size():
raise TypeError, "size doesn't match"
for key in dset.iterkeys():
if key not in self:
self[key] = dset[key]
else:
raise TypeError, "Key conflicting"
def item(self, idx):
"""Return the item at the position idx
"""
rst = DataItem()
for key in self.iterkeys():
rst[key] = self[key][idx]
return rst
def writecsv(self, filename, **kargs):
""" write this dataset into a csv file
@arg filename the path to the csv file
@kargs headers whether the output include headers
@kargs transposed whether output in the transposed manner
@kargs delimiter the delimiter used in csv file
@kargs quotechar the quotechar used in csv file
"""
_kargs = {
'headers': True,
'transposed' : False,
'delimiter' : ';',
'quotechar' : '`'}
_kargs.update(kargs)
keys = [key for key in self.iterkeys()]
with open(filename, 'wb') as fout:
csvwriter = csv.writer(fout, delimiter=_kargs['delimiter'],
quotechar=_kargs['quotechar'])
if not _kargs['transposed']:
if _kargs['headers']:
csvwriter.writerow([key for key in keys])
for item in self:
csvwriter.writerow([item[key] for key in keys])
else:
for key in keys:
csvwriter.writerow([key, ] if _kargs['headers'] else list() \
+ self[key])
@classmethod
def readcsv(filename, **kargs):
"""read from a csv file
@arg filename the path to the csv file
@kargs headers whether the output include headers
@kargs transposed whether output in the transposed manner
@kargs delimiter the delimiter used in csv file
@kargs quotechar the quotechar used in csv file
"""
_kargs = {
'headers': True,
'transposed' : False,
'delimiter' : ';',
'quotechar' : '`'}
_kargs.update(kargs)
#TODO finish read from a csv file
def __iter__(self):
"""Iterating items in the dataset
"""
for idx in range(self.size()):
yield self.item(idx)
class PartialIterator(object):
"""Iterator by an index list"""
def __init__(self, dset, idc):
super(PartialIterator, self).__init__()
self._dset, self._idc = dset, idc
self._idx = 0
def __iter__(self):
"""Make it iterative"""
for idx in self._idc:
yield self._dset.item(idx)
class DataItem(dict):
"""Keeps data"""
def __init__(self, *arg, **karg):
super(DataItem, self).__init__(*arg, **karg)
def accum_dist(self, src):
"""merge two distribution of words"""
for key in src.iterkeys():
if key in self.iterkeys():
self[key] += src[key]
else:
self[key] = src[key]
#---------------------------------------------------------- Database Access
def loadrows(config, cols, wheres=None, table='sample', other=''):
"""Load tweets to list on conditions"""
query = 'SELECT ' + \
((', '.join(cols)) if cols!='*' else '*') \
+ ' FROM ' + table + \
((' WHERE ' + ' AND '.join(wheres)) if wheres else '') \
+ ' ' + other
cur = CONN_POOL.get_cur(config)
print query
cur.execute(query)
res = Dataset()
for row in cur:
twt = DataItem()
for key in cols:
twt[key] = row[key]
res.append(twt)
print 'Count: {0}'.format(cur.rowcount)
return res
def qloadrows(config, query):
"""Load tweets to list on conditions"""
cur = CONN_POOL.get_cur(config)
print query
cur.execute(query)
print 'Count: {0}'.format(cur.rowcount)
return Dataset().extend([row for row in cur])
@Cache()
def place_name(pid, dbconf=GEOTWEET):
"""Return place name given a pid"""
if IDPTN.match(pid) is None:
return pid
cur = CONN_POOL.get_cur(dbconf)
cur.execute("select name from place where id=%s", (pid,))
return cur.fetchone()['name']
def city_random(pid, cnt=10000):
"""Randomly select some tweets from the city"""
return qloadrows(GEOTWEET, \
'SELECT sample.id as id, text, sample.lat, sample.lng \
from sample left join place on sample.place_id = place.id \
where place.superior_id = \'{0}\' LIMIT {1}'.format(pid, cnt))
def type_random(typ, cnt=10000):
"""Randomly select some tweets from the place of the type"""
return qloadrows(GEOTWEET, \
'SELECT sample.id as id, place_id, text, sample.lat, sample.lng \
from sample left join place on sample.place_id = place.id \
where place.super_category = \'{0}\' LIMIT {1}'.format(typ, cnt))
def load_by_place(src):
"""Return samples according to the place list provided by src"""
twt_lst = Dataset()
for place in src:
print place.strip()
q_twt = qloadrows(GEOTWEET, \
'SELECT sample.id, place_id, place.name, \
text, place.lat, place.lng, category, super_category \
FROM sample left join place on sample.place_id = place.id \
where place_id = \'{0}\''.format(place.strip()))
twt_lst.extend(q_twt)
print len(twt_lst)
return twt_lst
def load_by_region(region):
"""Return samples according to the region"""
return loadrows(GEOTWEET, \
('id', 'place_id', 'text', 'lat', 'lng'),
('MBRContains({0}, geo)'.format(geo_rect(*region)),))
if __name__ == '__main__':
#region2arff('test.arff', ((40.75,-74.02),(40.70,-73.97)))
#d = Dataset([{'id':'a', 'val':1},
#{'id':'b', 'val':1}, {'id':'a', 'val':2}])
#print d
#print d.groupfunc('id', len)
#print list_split([1,2,3,4,5,6,7,8,9], 4)
print place_name('0007a1bd373a2805')
| |
#!/usr/bin/env python3
"""Neurodocker is a command-line interface to generate custom Dockerfiles and
Singularity recipes.
For help generating Dockerfiles and Singularity recipes, run
$ neurodocker generate docker --help
$ neurodocker generate singularity --help
"""
from argparse import Action
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
import json
import logging
import sys
from neurodocker import __version__
from neurodocker import utils
from neurodocker.generators import Dockerfile
from neurodocker.generators import SingularityRecipe
from neurodocker.generators.common import _installation_implementations
logger = logging.getLogger(__name__)
# https://stackoverflow.com/a/9028031/5666087
class OrderedArgs(Action):
"""Object to preserve order in which command-line arguments are given."""
def __call__(self, parser, namespace, values, option_string=None):
if "ordered_args" not in namespace:
setattr(namespace, "ordered_args", [])
previous = namespace.ordered_args
previous.append((self.dest, values))
setattr(namespace, "ordered_args", previous)
def _list_of_kv(kv):
"""Split string `kv` at first equals sign."""
ll = kv.split("=")
ll[1:] = ["=".join(ll[1:])]
return ll
def _add_generate_common_arguments(parser):
p = parser
p.add_argument("-b", "--base", help="Base Docker image. E.g., debian:stretch")
p.add_argument(
"-p", "--pkg-manager", choices={"apt", "yum"}, help="Linux package manager."
)
p.add_argument(
"--add-to-entrypoint",
action=OrderedArgs,
help=(
"Add a command to the file /neurodocker/startup.sh, which is the"
" container's default entrypoint."
),
)
p.add_argument(
"--copy",
action=OrderedArgs,
nargs="+",
help="Copy files into container. Use format <src>... <dest>",
)
p.add_argument(
"--install",
action=OrderedArgs,
nargs="+",
help=(
"Install system packages with apt-get or yum, depending on the"
" package manager specified."
),
)
p.add_argument(
"--entrypoint",
action=OrderedArgs,
help="Set the container's entrypoint (Docker) / append to runscript"
" (Singularity)",
)
p.add_argument(
"-e",
"--env",
action=OrderedArgs,
nargs="+",
type=_list_of_kv,
help="Set environment variable(s). Use the format KEY=VALUE",
)
p.add_argument(
"-r", "--run", action=OrderedArgs, help="Run a command when building container"
)
p.add_argument("--run-bash", action=OrderedArgs, help="Run a command in bash")
p.add_argument(
"-u",
"--user",
action=OrderedArgs,
help="Switch current user (creates user if necessary)",
)
p.add_argument("-w", "--workdir", action=OrderedArgs, help="Set working directory")
# To generate from file.
p.add_argument(
"file",
nargs="?",
help="Generate file from JSON. Overrides other `generate` arguments",
)
p.add_argument("--json", action="store_true", help="Print Neurodocker JSON spec")
# Other arguments (no order).
p.add_argument(
"-o",
"--output",
dest="output",
help="If specified, save Dockerfile to file with this name.",
)
p.add_argument(
"--no-print",
dest="no_print",
action="store_true",
help="Do not print the generated file",
)
_ndeb_servers = ", ".join(
_installation_implementations["neurodebian"]._servers.keys()
)
# Software package options.
pkgs_help = {
"all": "Install software packages. Each argument takes a list of"
" key=value pairs. Where applicable, the default installation"
" behavior is to install by downloading and uncompressing"
" binaries. Some programs can be built from source.",
"afni": "Install AFNI. Valid keys are version (required), method,"
" install_path, install_r, install_r_pkgs, install_python2,"
" and install_python3. Only the latest version and version"
" 17.2.02 are supported at this time.",
"ants": "Install ANTs. Valid keys are version (required), method"
" install_path, cmake_opts, and make_opts. Version can be a "
" git commit hash if building from source.",
"convert3d": "Install Convert3D. Valid keys are version (required),"
" method, and install_path.",
"dcm2niix": "Install dcm2niix. Valid keys are version, method,"
" install_path, cmake_opts, and make_opts",
"freesurfer": "Install FreeSurfer. Valid keys are version (required),"
" method, install_path, and exclude_paths. A FreeSurfer"
" license is required to run the software and is not"
" provided by Neurodocker.",
"fsl": "Install FSL. Valid keys are version (required), method,"
" install_path, and exclude_paths.",
"matlabmcr": "Install Matlab Compiler Runtime. Valid keys are version,"
" method, and install_path",
"miniconda": "Install Miniconda. Valid keys are install_path,"
" env_name, conda_install, pip_install, conda_opts,"
" pip_opts, activate (default false), and version"
" (defaults to latest). The options conda_install and"
" pip_install accept strings of packages: conda_install="
'"python=3.6 numpy traits".',
"mricron": "Install MRIcron. valid keys are version (required), method, and"
" install_path.",
"mrtrix3": "Install MRtrix3. Valid keys are version (required),"
" method, and install_path",
"ndfreeze": "Use the NeuroDebian command `nd_freeze` to freeze the apt"
" sources to a certain date. This will only have an effect"
" on Debian and NeuroDebian APT sources.",
"neurodebian": "Add NeuroDebian repository. Valid keys are "
"os_codename (e.g., zesty), server (e.g., usa-nh), and"
" full (if true, use non-free packages). Valid download"
" servers are {}.".format(_ndeb_servers),
"spm12": "Install SPM12 and its dependency, Matlab Compiler Runtime."
" Valid keys are version and install_path.",
"minc": "Install MINC. Valid keys is version (required), method, and"
" install_path. Only version 1.9.15 is supported at this"
" time.",
"petpvc": "Install PETPVC. Valid keys are version (required), method,"
" and install_path.",
"vnc": "Install a VNC server. Valid keys are passwd (required),"
" start_at_runtime, and geometry.",
}
pkgs = p.add_argument_group(
title="software package arguments", description=pkgs_help["all"]
)
for pkg in _installation_implementations.keys():
if pkg == "_header":
continue
flag = "--{}".format(pkg)
# MRtrix3 does not need any arguments by default.
nargs = "*" if pkg == "mrtrix3" else "+"
pkgs.add_argument(
flag,
dest=pkg,
nargs=nargs,
action=OrderedArgs,
metavar="",
type=_list_of_kv,
help=pkgs_help[pkg],
)
def _add_generate_docker_arguments(parser):
"""Add arguments to `parser` for sub-command `generate docker`."""
p = parser
# Arguments that should be ordered.
p.add_argument(
"--add",
action=OrderedArgs,
nargs="+",
help="Dockerfile ADD instruction. Use format <src>... <dest>",
)
p.add_argument(
"--arg",
action=OrderedArgs,
nargs="+",
type=_list_of_kv,
help="Dockerfile ARG instruction. Use format KEY[=DEFAULT_VALUE] ...",
)
p.add_argument(
"--cmd", action=OrderedArgs, nargs="+", help="Dockerfile CMD instruction."
)
p.add_argument(
"--expose", nargs="+", action=OrderedArgs, help="Dockerfile EXPOSE instruction."
)
p.add_argument(
"--label",
action=OrderedArgs,
nargs="+",
type=_list_of_kv,
help="Dockerfile LABEL instruction.",
)
p.add_argument(
"--volume", action=OrderedArgs, nargs="+", help="Dockerfile VOLUME instruction."
)
def _add_generate_singularity_arguments(parser):
"""Add arguments to `parser` for sub-command `generate singularity`."""
pass
def _add_reprozip_trace_arguments(parser):
"""Add arguments to `parser` for sub-command `reprozip-trace`."""
p = parser
p.add_argument("container", help="Running container in which to trace commands.")
p.add_argument("commands", nargs="+", help="Command(s) to trace.")
p.add_argument(
"--dir",
"-d",
dest="packfile_save_dir",
default=".",
help=("Directory in which to save pack file. Default " "is current directory."),
)
def _add_reprozip_merge_arguments(parser):
"""Add arguments to `parser` for sub-command `reprozip-merge`."""
p = parser
p.add_argument("outfile", help="Filepath to merged pack file.")
p.add_argument("pack_files", nargs="+", help="Pack files to merge.")
def create_parser():
"""Return command-line argument parser."""
parser = ArgumentParser(
description=__doc__, formatter_class=RawDescriptionHelpFormatter
)
verbosity_choices = ("debug", "info", "warning", "error", "critical")
parser.add_argument("-v", "--verbosity", choices=verbosity_choices)
parser.add_argument(
"-V",
"--version",
action="version",
version=("neurodocker version {}".format(__version__)),
)
subparsers = parser.add_subparsers(
dest="subparser_name", title="subcommands", description="valid subcommands"
)
# `neurodocker generate` parsers.
generate_parser = subparsers.add_parser("generate", help="generate recipes")
generate_subparsers = generate_parser.add_subparsers(
dest="subsubparser_name", title="subcommands", description="valid subcommands"
)
generate_docker_parser = generate_subparsers.add_parser(
"docker", help="generate Dockerfile"
)
generate_singularity_parser = generate_subparsers.add_parser(
"singularity", help="generate Singularity recipe"
)
_add_generate_common_arguments(generate_docker_parser)
_add_generate_docker_arguments(generate_docker_parser)
_add_generate_common_arguments(generate_singularity_parser)
_add_generate_singularity_arguments(generate_singularity_parser)
# `neurodocker reprozip` parsers.
reprozip_parser = subparsers.add_parser("reprozip", help="")
reprozip_subparsers = reprozip_parser.add_subparsers(
dest="subsubparser_name", title="subcommands", description="valid subcommands"
)
reprozip_trace_parser = reprozip_subparsers.add_parser(
"trace", help="minify container for traced command(s)"
)
reprozip_merge_parser = reprozip_subparsers.add_parser(
"merge", help="merge reprozip pack files"
)
_add_reprozip_trace_arguments(reprozip_trace_parser)
_add_reprozip_merge_arguments(reprozip_merge_parser)
# Add verbosity option to both parsers. How can this be done with parents?
for p in (generate_parser, reprozip_trace_parser, reprozip_merge_parser):
p.add_argument("-v", "--verbosity", choices=verbosity_choices)
return parser
def parse_args(args):
"""Return namespace of command-line arguments."""
parser = create_parser()
namespace = parser.parse_args(args)
if namespace.subparser_name is None:
parser.print_help()
parser.exit(1)
elif namespace.subparser_name == "generate" and namespace.subsubparser_name is None:
parser.print_help()
parser.exit(1)
elif namespace.subparser_name == "reprozip" and namespace.subsubparser_name is None:
parser.print_help()
parser.exit(1)
elif namespace.subparser_name == "generate" and namespace.subsubparser_name in {
"docker",
"singularity",
}:
_validate_generate_args(namespace)
return namespace
def generate(namespace):
"""Run `neurodocker generate`."""
if namespace.file is None:
specs = utils._namespace_to_specs(namespace)
else:
specs = utils.load_json(namespace.file)
recipe_objs = {"docker": Dockerfile, "singularity": SingularityRecipe}
recipe_obj = recipe_objs[namespace.subsubparser_name](specs)
if namespace.json:
print(json.dumps(specs))
elif not namespace.no_print:
print(recipe_obj.render())
if namespace.output:
recipe_obj.save(filepath=namespace.output)
def reprozip_trace(namespace):
"""Run `neurodocker reprozip`."""
from neurodocker.reprozip import ReproZipMinimizer
local_packfile_path = ReproZipMinimizer(**vars(namespace)).run()
logger.info("Saved pack file on the local host:\n{}" "".format(local_packfile_path))
def reprozip_merge(namespace):
"""Run `neurodocker reprozip merge`."""
from neurodocker.reprozip import merge_pack_files
merge_pack_files(namespace.outfile, namespace.pack_files)
def _validate_generate_args(namespace):
if namespace.file is None and (
namespace.base is None or namespace.pkg_manager is None
):
raise ValueError(
"-b/--base and -p/--pkg-manager are required if not"
" generating from JSON file."
)
def main(args=None):
"""Main program function."""
if args is None:
namespace = parse_args(sys.argv[1:])
else:
namespace = parse_args(args)
if namespace.verbosity is not None:
utils.set_log_level(namespace.verbosity)
logger.debug(vars(namespace))
subparser_functions = {
"docker": generate,
"singularity": generate,
"trace": reprozip_trace,
"merge": reprozip_merge,
}
subparser_functions[namespace.subsubparser_name](namespace)
if __name__ == "__main__": # pragma: no cover
main()
| |
'''
(*)~---------------------------------------------------------------------------
Pupil - eye tracking platform
Copyright (C) 2012-2017 Pupil Labs
Distributed under the terms of the GNU
Lesser General Public License (LGPL v3.0).
See COPYING and COPYING.LESSER for license details.
---------------------------------------------------------------------------~(*)
'''
import time
import logging
import uvc
from .base_backend import InitialisationError, Base_Source, Base_Manager
# check versions for our own depedencies as they are fast-changing
assert uvc.__version__ >= '0.91'
# logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class UVC_Source(Base_Source):
"""
Camera Capture is a class that encapsualtes uvc.Capture:
"""
def __init__(self, g_pool, frame_size, frame_rate, name=None, preferred_names=(), uid=None, uvc_controls={}):
super().__init__(g_pool)
self.uvc_capture = None
self._restart_in = 3
assert name or preferred_names or uid
self.devices = uvc.Device_List()
devices_by_name = {dev['name']: dev for dev in self.devices}
# if uid is supplied we init with that
if uid:
try:
self.uvc_capture = uvc.Capture(uid)
except uvc.OpenError:
logger.warning("No avalilable camera found that matched {}".format(preferred_names))
except uvc.InitError:
logger.error("Camera failed to initialize.")
except uvc.DeviceNotFoundError:
logger.warning("No camera found that matched {}".format(preferred_names))
# otherwise we use name or preffered_names
else:
if name:
preferred_names = (name,)
else:
pass
assert preferred_names
# try to init by name
for name in preferred_names:
for d_name in devices_by_name.keys():
if name in d_name:
uid_for_name = devices_by_name[d_name]['uid']
try:
self.uvc_capture = uvc.Capture(uid_for_name)
except uvc.OpenError:
logger.info("{} matches {} but is already in use or blocked.".format(uid_for_name, name))
except uvc.InitError:
logger.error("Camera failed to initialize.")
else:
break
# check if we were sucessfull
if not self.uvc_capture:
logger.error("Init failed. Capture is started in ghost mode. No images will be supplied.")
self.name_backup = preferred_names
self.frame_size_backup = frame_size
self.frame_rate_backup = frame_rate
else:
self.configure_capture(frame_size, frame_rate, uvc_controls)
self.name_backup = (self.name,)
self.frame_size_backup = frame_size
self.frame_rate_backup = frame_rate
def configure_capture(self, frame_size, frame_rate, uvc_controls):
# Set camera defaults. Override with previous settings afterwards
if 'C930e' in self.uvc_capture.name:
logger.debug('Timestamp offset for c930 applied: -0.1sec')
self.ts_offset = -0.1
else:
self.ts_offset = 0.0
# UVC setting quirks:
controls_dict = dict([(c.display_name, c) for c in self.uvc_capture.controls])
self.frame_size = frame_size
self.frame_rate = frame_rate
for c in self.uvc_capture.controls:
try:
if c.display_name == 'White Balance temperature':
c.display_name = 'White Balance Temperature'
elif c.display_name == 'White Balance temperature,Auto':
c.display_name = 'White Balance Temperature, Auto'
c.value = uvc_controls[c.display_name]
except KeyError:
logger.debug('No UVC setting "{}" found from settings.'.format(c.display_name))
try:
controls_dict['Auto Focus'].value = 0
except KeyError:
pass
if ("Pupil Cam1" in self.uvc_capture.name or
"USB2.0 Camera" in self.uvc_capture.name):
if ("ID0" in self.uvc_capture.name or "ID1" in self.uvc_capture.name):
self.uvc_capture.bandwidth_factor = 1.3
try: controls_dict['Auto Exposure Priority'].value = 0
except KeyError: pass
try: controls_dict['Auto Exposure Mode'].value = 1
except KeyError: pass
try:controls_dict['Saturation'].value = 0
except KeyError: pass
try: controls_dict['Absolute Exposure Time'].value = 63
except KeyError: pass
try: controls_dict['Backlight Compensation'].value = 2
except KeyError: pass
try: controls_dict['Gamma'].value = 100
except KeyError: pass
else:
self.uvc_capture.bandwidth_factor = 2.0
try: controls_dict['Auto Exposure Priority'].value = 1
except KeyError: pass
else:
self.uvc_capture.bandwidth_factor = 3.0
try: controls_dict['Auto Focus'].value = 0
except KeyError: pass
def _re_init_capture(self, uid):
current_size = self.uvc_capture.frame_size
current_fps = self.uvc_capture.frame_rate
current_uvc_controls = self._get_uvc_controls()
self.deinit_gui()
self.uvc_capture.close()
self.uvc_capture = uvc.Capture(uid)
self.configure_capture(current_size, current_fps, current_uvc_controls)
self.init_gui()
def _init_capture(self, uid):
self.deinit_gui()
self.uvc_capture = uvc.Capture(uid)
self.configure_capture(self.frame_size_backup, self.frame_rate_backup, self._get_uvc_controls())
self.init_gui()
def _re_init_capture_by_names(self, names):
# burn-in test specific. Do not change text!
self.devices.update()
for d in self.devices:
for name in names:
if d['name'] == name:
logger.info("Found device. {}.".format(name))
if self.uvc_capture:
self._re_init_capture(d['uid'])
else:
self._init_capture(d['uid'])
return
raise InitialisationError('Could not find Camera {} during re initilization.'.format(names))
def _restart_logic(self):
if self._restart_in <= 0:
if self.uvc_capture:
logger.warning("Capture failed to provide frames. Attempting to reinit.")
self.name_backup = (self.uvc_capture.name,)
self.uvc_capture = None
try:
self._re_init_capture_by_names(self.name_backup)
except (InitialisationError, uvc.InitError):
time.sleep(0.02)
self.deinit_gui()
self.init_gui()
self._restart_in = int(5/0.02)
else:
self._restart_in -= 1
def recent_events(self, events):
try:
frame = self.uvc_capture.get_frame(0.05)
frame.timestamp = self.g_pool.get_timestamp()+self.ts_offset
except uvc.StreamError:
self._recent_frame = None
self._restart_logic()
except (AttributeError, uvc.InitError):
self._recent_frame = None
time.sleep(0.02)
self._restart_logic()
else:
self._recent_frame = frame
events['frame'] = frame
self._restart_in = 3
def _get_uvc_controls(self):
d = {}
if self.uvc_capture:
for c in self.uvc_capture.controls:
d[c.display_name] = c.value
return d
def get_init_dict(self):
d = super().get_init_dict()
d['frame_size'] = self.frame_size
d['frame_rate'] = self.frame_rate
if self.uvc_capture:
d['name'] = self.name
d['uvc_controls'] = self._get_uvc_controls()
else:
d['preferred_names'] = self.name_backup
return d
@property
def name(self):
if self.uvc_capture:
return self.uvc_capture.name
else:
return "Ghost capture"
@property
def frame_size(self):
if self.uvc_capture:
return self.uvc_capture.frame_size
else:
return self.frame_size_backup
@frame_size.setter
def frame_size(self, new_size):
# closest match for size
sizes = [abs(r[0]-new_size[0]) for r in self.uvc_capture.frame_sizes]
best_size_idx = sizes.index(min(sizes))
size = self.uvc_capture.frame_sizes[best_size_idx]
if tuple(size) != tuple(new_size):
logger.warning("%s resolution capture mode not available. Selected {}.".format(new_size, size))
self.uvc_capture.frame_size = size
self.frame_size_backup = size
@property
def frame_rate(self):
if self.uvc_capture:
return self.uvc_capture.frame_rate
else:
return self.frame_rate_backup
@frame_rate.setter
def frame_rate(self, new_rate):
# closest match for rate
rates = [abs(r-new_rate) for r in self.uvc_capture.frame_rates]
best_rate_idx = rates.index(min(rates))
rate = self.uvc_capture.frame_rates[best_rate_idx]
if rate != new_rate:
logger.warning("{}fps capture mode not available at ({}) on '{}'. Selected {}fps. ".format(
new_rate, self.uvc_capture.frame_size, self.uvc_capture.name, rate))
self.uvc_capture.frame_rate = rate
self.frame_rate_backup = rate
@property
def jpeg_support(self):
return True
@property
def online(self):
return bool(self.uvc_capture)
def init_gui(self):
from pyglui import ui
ui_elements = []
# lets define some helper functions:
def gui_load_defaults():
for c in self.uvc_capture.controls:
try:
c.value = c.def_val
except:
pass
def gui_update_from_device():
for c in self.uvc_capture.controls:
c.refresh()
def set_frame_size(new_size):
self.frame_size = new_size
if self.uvc_capture is None:
ui_elements.append(ui.Info_Text('Capture initialization faild.'))
self.g_pool.capture_source_menu.extend(ui_elements)
return
ui_elements.append(ui.Info_Text('{} Controls'.format(self.name)))
sensor_control = ui.Growing_Menu(label='Sensor Settings')
sensor_control.append(ui.Info_Text("Do not change these during calibration or recording!"))
sensor_control.collapsed = False
image_processing = ui.Growing_Menu(label='Image Post Processing')
image_processing.collapsed = True
sensor_control.append(ui.Selector(
'frame_size', self,
setter=set_frame_size,
selection=self.uvc_capture.frame_sizes,
label='Resolution'
))
def frame_rate_getter():
return (self.uvc_capture.frame_rates, [str(fr) for fr in self.uvc_capture.frame_rates])
sensor_control.append(ui.Selector('frame_rate', self, selection_getter=frame_rate_getter, label='Frame rate'))
for control in self.uvc_capture.controls:
c = None
ctl_name = control.display_name
# now we add controls
if control.d_type == bool:
c = ui.Switch('value', control, label=ctl_name, on_val=control.max_val, off_val=control.min_val)
elif control.d_type == int:
c = ui.Slider('value', control, label=ctl_name, min=control.min_val, max=control.max_val, step=control.step)
elif type(control.d_type) == dict:
selection = [value for name, value in control.d_type.items()]
labels = [name for name, value in control.d_type.items()]
c = ui.Selector('value', control, label=ctl_name, selection=selection, labels=labels)
else:
pass
# if control['disabled']:
# c.read_only = True
# if ctl_name == 'Exposure, Auto Priority':
# # the controll should always be off. we set it to 0 on init (see above)
# c.read_only = True
if c is not None:
if control.unit == 'processing_unit':
image_processing.append(c)
else:
sensor_control.append(c)
ui_elements.append(sensor_control)
if image_processing.elements:
ui_elements.append(image_processing)
ui_elements.append(ui.Button("refresh",gui_update_from_device))
ui_elements.append(ui.Button("load defaults",gui_load_defaults))
self.g_pool.capture_source_menu.extend(ui_elements)
def cleanup(self):
self.devices.cleanup()
self.devices = None
if self.uvc_capture:
self.uvc_capture.close()
self.uvc_capture = None
super().cleanup()
class UVC_Manager(Base_Manager):
"""Manages local USB sources
Attributes:
check_intervall (float): Intervall in which to look for new UVC devices
"""
gui_name = 'Local USB'
def __init__(self, g_pool):
super().__init__(g_pool)
self.devices = uvc.Device_List()
def get_init_dict(self):
return {}
def init_gui(self):
from pyglui import ui
ui_elements = []
ui_elements.append(ui.Info_Text('Local UVC sources'))
def dev_selection_list():
default = (None, 'Select to activate')
self.devices.update()
dev_pairs = [default] + [(d['uid'], d['name']) for d in self.devices]
return zip(*dev_pairs)
def activate(source_uid):
if not source_uid:
return
if not uvc.is_accessible(source_uid):
logger.error("The selected camera is already in use or blocked.")
return
settings = {
'frame_size': self.g_pool.capture.frame_size,
'frame_rate': self.g_pool.capture.frame_rate,
'uid': source_uid
}
if self.g_pool.process == 'world':
self.notify_all({'subject': 'start_plugin', "name": "UVC_Source", 'args': settings})
else:
self.notify_all({'subject': 'start_eye_capture', 'target': self.g_pool.process, "name": "UVC_Source", 'args': settings})
ui_elements.append(ui.Selector(
'selected_source',
selection_getter=dev_selection_list,
getter=lambda: None,
setter=activate,
label='Activate source'
))
self.g_pool.capture_selector_menu.extend(ui_elements)
def cleanup(self):
self.deinit_gui()
self.devices.cleanup()
self.devices = None
def recent_events(self, events):
pass
| |
# coding=utf-8
import os
import sys
import time
import logging
import traceback
import configobj
import inspect
# Path Fix
sys.path.append(
os.path.abspath(
os.path.join(
os.path.dirname(__file__), "../")))
import diamond
from diamond.collector import Collector
from diamond.handler.Handler import Handler
from diamond.scheduler import ThreadedScheduler
from diamond.util import load_class_from_name
class Server(object):
"""
Server class loads and starts Handlers and Collectors
"""
def __init__(self, config):
# Initialize Logging
self.log = logging.getLogger('diamond')
# Initialize Members
self.config = config
self.running = False
self.handlers = []
self.modules = {}
self.tasks = {}
# Initialize Scheduler
self.scheduler = ThreadedScheduler()
def load_config(self):
"""
Load the full config
"""
configfile = os.path.abspath(self.config['configfile'])
config = configobj.ConfigObj(configfile)
config['configfile'] = self.config['configfile']
# Merge in handler config files into the main config
if 'handlers_config_path' in config['server']:
files = os.listdir(config['server']['handlers_config_path'])
for filename in files:
configname = os.path.basename(filename)
handlername = configname.split('.')[0]
if handlername not in self.config['handlers']:
config['handlers'][handlername] = configobj.ConfigObj()
configfile = os.path.join(
config['server']['handlers_config_path'],
configname)
hconfig = configobj.ConfigObj(configfile)
if handlername in config['handlers']:
config['handlers'][handlername].merge(hconfig)
else:
config['handlers'][handlername] = hconfig
self.config = config
def load_handler(self, fqcn):
"""
Load Handler class named fqcn
"""
# Load class
cls = load_class_from_name(fqcn)
# Check if cls is subclass of Handler
if cls == Handler or not issubclass(cls, Handler):
raise TypeError("%s is not a valid Handler" % fqcn)
# Log
self.log.debug("Loaded Handler: %s", fqcn)
return cls
def load_handlers(self):
"""
Load handlers
"""
if type(self.config['server']['handlers']) == str:
handlers = [self.config['server']['handlers']]
self.config['server']['handlers'] = handlers
for h in self.config['server']['handlers']:
try:
# Load Handler Class
cls = self.load_handler(h)
# Initialize Handler config
handler_config = configobj.ConfigObj()
# Merge default Handler default config
handler_config.merge(self.config['handlers']['default'])
# Check if Handler config exists
if cls.__name__ in self.config['handlers']:
# Merge Handler config section
handler_config.merge(self.config['handlers'][cls.__name__])
# Initialize Handler class
self.handlers.append(cls(handler_config))
except ImportError:
# Log Error
self.log.debug("Failed to load handler %s. %s", h,
traceback.format_exc())
continue
def load_collector(self, fqcn):
"""
Load Collector class named fqcn
"""
# Load class
cls = load_class_from_name(fqcn)
# Check if cls is subclass of Collector
if cls == Collector or not issubclass(cls, Collector):
raise TypeError("%s is not a valid Collector" % fqcn)
# Log
self.log.debug("Loaded Collector: %s", fqcn)
return cls
def load_include_path(self, path):
"""
Scan for and add paths to the include path
"""
# Verify the path is valid
if not os.path.isdir(path):
return
# Add path to the system path
sys.path.append(path)
# Load all the files in path
for f in os.listdir(path):
# Are we a directory? If so process down the tree
fpath = os.path.join(path, f)
if os.path.isdir(fpath):
self.load_include_path(fpath)
def load_collectors(self, path, filter=None):
"""
Scan for collectors to load from path
"""
# Initialize return value
collectors = {}
# Get a list of files in the directory, if the directory exists
if not os.path.exists(path):
raise OSError("Directory does not exist: %s" % path)
if path.endswith('tests') or path.endswith('fixtures'):
return collectors
# Log
self.log.debug("Loading Collectors from: %s", path)
# Load all the files in path
for f in os.listdir(path):
# Are we a directory? If so process down the tree
fpath = os.path.join(path, f)
if os.path.isdir(fpath):
subcollectors = self.load_collectors(fpath)
for key in subcollectors:
collectors[key] = subcollectors[key]
# Ignore anything that isn't a .py file
elif (os.path.isfile(fpath)
and len(f) > 3
and f[-3:] == '.py'
and f[0:4] != 'test'
and f[0] != '.'):
# Check filter
if filter and os.path.join(path, f) != filter:
continue
modname = f[:-3]
# Stat module file to get mtime
st = os.stat(os.path.join(path, f))
mtime = st.st_mtime
# Check if module has been loaded before
if modname in self.modules:
# Check if file mtime is newer then the last loaded verison
if mtime <= self.modules[modname]:
# Module hasn't changed
# Log
self.log.debug("Found %s, but it hasn't changed.",
modname)
continue
try:
# Import the module
mod = __import__(modname, globals(), locals(), ['*'])
except ImportError:
# Log error
self.log.error("Failed to import module: %s. %s", modname,
traceback.format_exc())
continue
# Update module mtime
self.modules[modname] = mtime
# Log
self.log.debug("Loaded Module: %s", modname)
# Find all classes defined in the module
for attrname in dir(mod):
attr = getattr(mod, attrname)
# Only attempt to load classes that are infact classes
# are Collectors but are not the base Collector class
if (inspect.isclass(attr)
and issubclass(attr, Collector)
and attr != Collector):
if attrname.startswith('parent_'):
continue
# Get class name
fqcn = '.'.join([modname, attrname])
try:
# Load Collector class
cls = self.load_collector(fqcn)
# Add Collector class
collectors[cls.__name__] = cls
except Exception:
# Log error
self.log.error("Failed to load Collector: %s. %s",
fqcn, traceback.format_exc())
continue
# Return Collector classes
return collectors
def init_collector(self, cls):
"""
Initialize collector
"""
collector = None
try:
# Initialize Collector
collector = cls(self.config, self.handlers)
# Log
self.log.debug("Initialized Collector: %s", cls.__name__)
except Exception:
# Log error
self.log.error("Failed to initialize Collector: %s. %s",
cls.__name__, traceback.format_exc())
# Return collector
return collector
def schedule_collector(self, c, interval_task=True):
"""
Schedule collector
"""
# Check collector is for realz
if c is None:
self.log.warn("Skipped loading invalid Collector: %s",
c.__class__.__name__)
return
if c.config['enabled'] != True:
self.log.warn("Skipped loading disabled Collector: %s",
c.__class__.__name__)
return
# Get collector schedule
for name, schedule in c.get_schedule().items():
# Get scheduler args
func, args, splay, interval = schedule
# Check if Collecter with same name has already been scheduled
if name in self.tasks:
self.scheduler.cancel(self.tasks[name])
# Log
self.log.debug("Canceled task: %s", name)
method = diamond.scheduler.method.sequential
if 'method' in c.config:
if c.config['method'] == 'Threaded':
method = diamond.scheduler.method.threaded
elif c.config['method'] == 'Forked':
method = diamond.scheduler.method.forked
# Schedule Collector
if interval_task:
task = self.scheduler.add_interval_task(func,
name,
splay,
interval,
method,
args,
None,
True)
else:
task = self.scheduler.add_single_task(func,
name,
splay,
method,
args,
None)
# Log
self.log.debug("Scheduled task: %s", name)
# Add task to list
self.tasks[name] = task
def run(self):
"""
Load handler and collector classes and then start collectors
"""
# Set Running Flag
self.running = True
# Load handlers
if 'handlers_path' in self.config['server']:
handlers_path = self.config['server']['handlers_path']
self.load_include_path(handlers_path)
self.load_handlers()
# Load config
self.load_config()
# Load collectors
collectors_path = self.config['server']['collectors_path']
self.load_include_path(collectors_path)
collectors = self.load_collectors(collectors_path)
# Setup Collectors
for cls in collectors.values():
# Initialize Collector
c = self.init_collector(cls)
# Schedule Collector
self.schedule_collector(c)
# Start main loop
self.mainloop()
def run_one(self, file):
"""
Run given collector once and then exit
"""
# Set Running Flag
self.running = True
# Load handlers
if 'handlers_path' in self.config['server']:
handlers_path = self.config['server']['handlers_path']
self.load_include_path(handlers_path)
self.load_handlers()
# Overrides collector config dir
collector_config_path = os.path.abspath(os.path.dirname(file))
self.config['server']['collectors_config_path'] = collector_config_path
# Load config
self.load_config()
# Load collectors
self.load_include_path(os.path.dirname(file))
collectors = self.load_collectors(os.path.dirname(file), file)
# Setup Collectors
for cls in collectors.values():
# Initialize Collector
c = self.init_collector(cls)
# Schedule collector
self.schedule_collector(c, False)
# Start main loop
self.mainloop(False)
def mainloop(self, reload=True):
# Start scheduler
self.scheduler.start()
# Log
self.log.info('Started task scheduler.')
# Initialize reload timer
time_since_reload = 0
# Main Loop
while self.running:
time.sleep(1)
time_since_reload += 1
# Check if its time to reload collectors
if (reload
and time_since_reload
> int(self.config['server']['collectors_reload_interval'])):
self.log.debug("Reloading config.")
self.load_config()
# Log
self.log.debug("Reloading collectors.")
# Load collectors
collectors_path = self.config['server']['collectors_path']
collectors = self.load_collectors(collectors_path)
# Setup any Collectors that were loaded
for cls in collectors.values():
# Initialize Collector
c = self.init_collector(cls)
# Schedule Collector
self.schedule_collector(c)
# Reset reload timer
time_since_reload = 0
# Is the queue empty and we won't attempt to reload it? Exit
if not reload and len(self.scheduler.sched._queue) == 0:
self.running = False
# Log
self.log.debug('Stopping task scheduler.')
# Stop scheduler
self.scheduler.stop()
# Log
self.log.info('Stopped task scheduler.')
# Log
self.log.debug("Exiting.")
def stop(self):
"""
Close all connections and terminate threads.
"""
# Set Running Flag
self.running = False
| |
# Game python file
import pygame
import button
import random
import time
import translate
import questions
import textbox
import player
import checkbox
import math
import menumusic
import score
def update(game):
pass
# Deze class zorgt ervoor dat het dice systeem werkt
class Dice:
def __init__(self):
# zet de begin image van de die naar een lege
self.image = "assets\img\die0.png"
def onclick(self,game):
if game.get_current_player().did_roll:
return
# TODO: niet display.flip gebruiken
for x in range(15):
self.newimg = "assets\img\die{}.png".format(random.randrange(1,7))
while self.newimg == self.image:
self.newimg = "assets\img\die{}.png".format(random.randrange(1,7))
self.image = self.newimg
self.draw(game)
pygame.display.flip()
time.sleep(0.05)
# dit pakt een random nummer van 1 t/m 6 en slaat het op in game.dice_roll
game.get_current_player().dice_roll = random.randrange(1, 7)
game.get_current_player().did_roll = True
# dit zet het plaatje van de die naar hetgeen wat gegooid is
self.image = "assets\img\die{}.png".format(game.get_current_player().dice_roll)
#Entertainment questions
if game.get_current_player().pos.get_col() == 1:
game.question = random.randrange(1,31)
#History questions
elif game.get_current_player().pos.get_col() == 2:
game.question = random.randrange(31,44)
#Sport questions
elif game.get_current_player().pos.get_col() == 3:
game.question = random.randrange(44,59)
#Geography questions
elif game.get_current_player().pos.get_col() == 4:
game.question = random.randrange(59,70)
game.get_current_player().turn_start = time.clock()
def draw(self,game):
# dit tekent de die
label = (pygame.font.Font(None, 20)).render(translate.translate("ROLL"),1,(0,0,0))
self.size = (pygame.font.Font(None, 20)).size(translate.translate("ROLL"))
if game.get_current_player().did_roll == False and not game.get_current_player().direction == None:
game.screen.blit(label, (702 - self.size[0]/2, 515))
button.draw_img(game, game.width - 130, game.height - 70, 64, 64, "", 0, self.image, (0,0,0), self.onclick)
def correct_answer(game):
for x in range(1,4):
if translate.translate(game.get_current_player().answers[x-1]) == translate.translate("QUESTIONANSWER{}".format(game.question)):
return x
break
print("Question {} is incorrect!".format(game.question))
return 1
class GameLogic:
def __init__(self):
self.dice = Dice()
def draw(self, game):
# draw players in rows
for plr in game.players:
plr.draw()
# draw questions etc
if game.get_current_player().did_roll and not game.get_current_player().did_answer and not game.get_current_player().moves_left:
if not game.get_current_player().did_generate_question:
# remove existing answers
game.get_current_player().answers.clear()
# add new answers
game.get_current_player().answers.append("QUESTION{}_ANSWER1".format(game.question))
game.get_current_player().answers.append("QUESTION{}_ANSWER2".format(game.question))
game.get_current_player().answers.append("QUESTION{}_ANSWER3".format(game.question))
game.get_current_player().answers.append("QUESTION{}".format(game.question))
# do not re-generate question
game.get_current_player().did_generate_question = True
# draw question popup
if not game.get_current_player().isAI:
font = pygame.font.Font(None, 20)
pygame.draw.rect(game.screen,(255,255,255),(24,9,game.width*0.8 + 2,game.height * 0.9 + 2))
# change popup according to category
# entertainment question popup
if game.get_current_player().pos.get_col() == 1:
pygame.draw.rect(game.screen,(255,0,0),(25,10,game.width*0.8,game.height * 0.9))
# history question popup
elif game.get_current_player().pos.get_col() == 2:
pygame.draw.rect(game.screen,(200,200,0),(25,10,game.width*0.8,game.height * 0.9))
# sport question popup
elif game.get_current_player().pos.get_col() == 3:
pygame.draw.rect(game.screen,(52,163,253),(25,10,game.width*0.8,game.height * 0.9))
# geography question popup
elif game.get_current_player().pos.get_col() == 4:
pygame.draw.rect(game.screen,(24,208,27),(25,10,game.width*0.8,game.height * 0.9))
game.screen.blit(font.render(translate.translate(game.get_current_player().answers[3]), 1, (255,255,255)), (32,17))
button.draw(game, game.width * 0.25,162,300,60, translate.translate(game.get_current_player().answers[0]), 20, (0,0,0), (255,255,255), lambda game: question_chosen(game, 1))
button.draw(game, game.width * 0.25,252,300,60, translate.translate(game.get_current_player().answers[1]), 20, (0,0,0), (255,255,255), lambda game: question_chosen(game, 2))
button.draw(game, game.width * 0.25,342,300,60, translate.translate(game.get_current_player().answers[2]), 20, (0,0,0), (255,255,255), lambda game: question_chosen(game, 3))
if math.floor((time.clock() - game.get_current_player().turn_start) / 2) < 12:
menumusic.timer_snd.play(1)
game.screen.blit(pygame.image.load("assets\img\hourglass{}.png".format(math.floor((time.clock() - game.get_current_player().turn_start) / 2))), (600, 40))
else:
question_chosen(game, 5)
else:
if random.randrange(1,4) == 2:
question_chosen(game, correct_answer(game))
else:
question_chosen(game, random.randrange(1, 4))
elif not game.get_current_player().did_roll and not game.get_current_player().did_choose_row:
# draw start buttons
if not game.get_current_player().isAI:
if 1 not in game.chosen:
button.draw(game, 45, game.height * 0.9, 100, 32, "Start", 20, (0,0,0), (255,255,255), lambda game: start_chosen(game, 1))
if 2 not in game.chosen:
button.draw(game, 175, game.height * 0.9, 100, 32, "Start", 20, (0,0,0), (255,255,255), lambda game: start_chosen(game, 2))
if 3 not in game.chosen:
button.draw(game, 305, game.height * 0.9, 100, 32, "Start", 20, (0,0,0), (255,255,255), lambda game: start_chosen(game, 3))
if 4 not in game.chosen:
button.draw(game, 435, game.height * 0.9, 100, 32, "Start", 20, (0,0,0), (255,255,255), lambda game: start_chosen(game, 4))
else:
time.sleep(0.4)
chosen = False
while not chosen:
number = random.randrange(1,5)
if not number in game.chosen:
start_chosen(game,number)
chosen = True
elif game.get_current_player().direction == None:
if not game.get_current_player().isAI:
# draw movement buttons
button.draw_img(game, game.width - 145, game.height - 264, 80, 80, "", 0, "assets/img/pijlomhoog.png", (0,0,0), lambda game: game.get_current_player().set_direction("up"))
button.draw_img(game, game.width - (145 + 40), game.height - 200, 80, 80, "", 0, "assets/img/pijllinks.png", (0,0,0), lambda game: game.get_current_player().set_direction("left"))
button.draw_img(game, game.width - (145 - 40), game.height - 200, 80, 80, "", 0, "assets/img/pijlrechts.png", (0,0,0), lambda game: game.get_current_player().set_direction("right"))
# button.draw(game, 435, game.height * 0.9, 100, 32, "Start", 20, (0,0,0), (255,255,255), lambda game: start_chosen(game, 4))
else:
time.sleep(0.3)
if game.get_current_player().pos.get_y() > 2 and game.get_current_player().pos.get_y() < 13:
for plr in game.players:
if game.get_current_player().pos.get_col() == plr.pos.get_col() + 1 and game.get_current_player().pos.get_y() == plr.pos.get_y() and plr != game.get_current_player():
game.get_current_player().set_direction("left")
if game.get_current_player().pos.get_col() == plr.pos.get_col() - 1 and game.get_current_player().pos.get_y() == plr.pos.get_y() and plr != game.get_current_player():
game.get_current_player().set_direction("right")
if game.get_current_player().pos.get_col() == plr.pos.get_col() + 2 and plr.pos.get_x() == 1 and game.get_current_player().pos.get_y() == plr.pos.get_y() and plr != game.get_current_player():
game.get_current_player().set_direction("left")
if game.get_current_player().pos.get_col() == plr.pos.get_col() - 2 and plr.pos.get_x() == 0 and game.get_current_player().pos.get_y() == plr.pos.get_y() and plr != game.get_current_player():
game.get_current_player().set_direction("right")
if game.get_current_player().pos.get_col() == 1 and plr.pos.get_col() == 4 or (plr.pos.get_col() == 3 and plr.pos.get_x() == 1 and game.get_current_player().pos.get_x() == 0) and game.get_current_player().pos.get_y() == plr.pos.get_y() and plr != game.get_current_player():
game.get_current_player().set_direction("left")
if game.get_current_player().pos.get_col() == 4 and plr.pos.get_col() == 1 or (plr.pos.get_col() == 2 and plr.pos.get_x() == 0 and game.get_current_player().pos.get_x() == 1) and game.get_current_player().pos.get_y() == plr.pos.get_y() and plr != game.get_current_player():
game.get_current_player().set_direction("right")
if game.get_current_player().pos.get_col() == plr.pos.get_col() and game.get_current_player().pos.get_y() == plr.pos.get_y() and game.get_current_player().pos.get_x() == plr.pos.get_x() - 1 and plr != game.get_current_player():
game.get_current_player().set_direction("right")
if game.get_current_player().pos.get_col() == plr.pos.get_col() and game.get_current_player().pos.get_y() == plr.pos.get_y() and game.get_current_player().pos.get_x() == plr.pos.get_x() + 1 and plr != game.get_current_player():
game.get_current_player().set_direction("left")
if game.get_current_player().pos.get_y() > 9 and game.get_current_player().pos.get_y() == plr.pos.get_y() and plr != game.get_current_player():
game.get_current_player().set_direction("right")
if game.get_current_player().direction == None:
game.get_current_player().set_direction("up")
elif game.get_current_player().moves_left:
if game.get_current_player().direction == "up":
game.get_current_player().go_up()
elif game.get_current_player().direction == "left":
game.get_current_player().go_left()
elif game.get_current_player().direction == "right":
game.get_current_player().go_right()
elif game.get_current_player().direction == "down":
game.get_current_player().go_down()
# draw die
if game.get_current_player().did_choose_row and not game.get_current_player().direction == None and not game.get_current_player().moves_left:
if game.get_current_player().isAI:
pygame.display.flip()
if not game.get_current_player().did_roll:
pygame.display.flip()
time.sleep(0.4)
self.dice.onclick(game)
self.dice.draw(game)
gamelogic = GameLogic()
def question_chosen(game, idx):
# game.set_next_player()
# gamelogic.dice.image = "assets\img\die0.png"
# check if the question was answerred correctly
# increment score for correct question, and set the amount of moves we can make.
menumusic.timer_snd.stop()
game.get_current_player().turn_start = 0
if idx == 5:
game.get_current_player().score -= 10
game.get_current_player().set_direction(None)
game.set_next_player()
corrfont = pygame.font.Font(None, 72)
label_1 = corrfont.render(translate.translate("OVERTIME"), 1, (200,0,0))
size = corrfont.size(translate.translate("OVERTIME"))
game.screen.blit(label_1,(int(game.width/2 - (size[0]/2 + 45)), game.height/5 - (size[1]/2)))
pygame.display.flip()
time.sleep(0.7)
elif translate.translate(game.get_current_player().answers[idx-1]) == translate.translate("QUESTIONANSWER{}".format(game.question)):
#correct sound
menumusic.correct_snd.play()
game.get_current_player().moves_left = math.ceil(game.get_current_player().dice_roll / 2)
game.get_current_player().score += (15 * game.get_current_player().moves_left) + ((game.get_current_player().moves_left * 10) - 10)
# update score in database
score.update(game.get_current_player().name, game.get_current_player().score)
corrfont = pygame.font.Font(None, 72)
label_1 = corrfont.render("CORRECT!", 1, (0,200,0))
label_2 = (pygame.font.Font(None, 30)).render("+" + str((15 * game.get_current_player().moves_left) + ((game.get_current_player().moves_left * 10) - 10)) + " score", 1, (0,200,0))
size = corrfont.size("CORRECT!")
size2 = (pygame.font.Font(None, 30)).size("+" + str((15 * game.get_current_player().moves_left) + ((game.get_current_player().moves_left * 10) - 10)) + " score")
game.screen.blit(label_1,(int(game.width/2 - (size[0]/2 + 45)), game.height/5.5 - (size[1]/2)))
game.screen.blit(label_2,(int(game.width/2 - (size[0]/2 - 25)), game.height/5 - (size[1]/2) + 35))
pygame.display.flip()
time.sleep(0.7)
else:
#incorrect sound
menumusic.wrong_snd.play()
game.get_current_player().score -= 10
game.get_current_player().set_direction(None)
game.set_next_player()
corrfont = pygame.font.Font(None, 72)
label_1 = corrfont.render("INCORRECT!", 1, (200,0,0))
label_2 = (pygame.font.Font(None, 30)).render("-10 score", 1, (200,0,0))
size = corrfont.size("INCORRECT!")
size2 = (pygame.font.Font(None, 30)).size("-10 score")
game.screen.blit(label_1,(int(game.width/2 - (size[0]/2 + 45)), game.height/5.5 - (size[1]/2)))
game.screen.blit(label_2,(int(game.width/2 - (size[0]/2 - 50)), game.height/5 - (size[1]/2) + 35))
pygame.display.flip()
time.sleep(0.7)
def start_chosen(game, idx):
game.get_current_player().setpos(idx, 0, 0)
game.get_current_player().did_choose_row = True
game.chosen.append(idx)
game.set_next_player()
def TBcallback(game, box, isEnterPressed, id, next):
SetName(id, game, box)
if isEnterPressed == True and next != None:
textbox.textfields[id].isFocussed = False
textbox.textfields[next].isFocussed = True
elif isEnterPressed == True and next == None:
textbox.textfields[id].isFocussed = False
def SetPlayerCount(game, idx):
if idx == 2:
game.players.append(player.Player(game))
game.players.append(player.Player(game))
textbox.create(game, game.width * 0.3, game.height * 0.2, 250, "", lambda game,box,isEnterPressed: TBcallback(game, box, isEnterPressed, 0, 1))
textbox.create(game, game.width * 0.3, game.height * 0.35, 250, "", lambda game,box,isEnterPressed: TBcallback(game, box, isEnterPressed, 1, None))
checkbox.create(game, game.width * 0.7, game.height * 0.35, "AI", False, lambda game,box: SetAI(1, game, box))
if idx == 3:
game.players.append(player.Player(game))
game.players.append(player.Player(game))
game.players.append(player.Player(game))
textbox.create(game, game.width * 0.3, game.height * 0.2, 250, "", lambda game,box,isEnterPressed: TBcallback(game, box, isEnterPressed, 0, 1))
textbox.create(game,game.width * 0.3, game.height * 0.35, 250, "", lambda game,box,isEnterPressed: TBcallback(game, box, isEnterPressed, 1, 2))
textbox.create(game, game.width * 0.3, game.height * 0.50, 250, "", lambda game,box,isEnterPressed: TBcallback(game, box, isEnterPressed, 2, None))
checkbox.create(game, game.width * 0.7, game.height * 0.35, "AI", False, lambda game,box: SetAI(1, game, box))
checkbox.create(game, game.width * 0.7, game.height * 0.50, "AI", False, lambda game,box: SetAI(2, game, box))
if idx == 4:
game.players.append(player.Player(game))
game.players.append(player.Player(game))
game.players.append(player.Player(game))
game.players.append(player.Player(game))
textbox.create(game, game.width * 0.3, game.height * 0.2, 250, "", lambda game,box,isEnterPressed: TBcallback(game, box, isEnterPressed, 0, 1))
textbox.create(game, game.width * 0.3, game.height * 0.35, 250, "", lambda game,box,isEnterPressed: TBcallback(game, box, isEnterPressed, 1, 2))
textbox.create(game, game.width * 0.3, game.height * 0.50, 250, "", lambda game,box,isEnterPressed: TBcallback(game, box, isEnterPressed, 2, 3))
textbox.create(game, game.width * 0.3, game.height * 0.65, 250, "", lambda game,box,isEnterPressed: TBcallback(game, box, isEnterPressed, 3, None))
checkbox.create(game, game.width * 0.7, game.height * 0.20, "AI", False, lambda game,box: SetAI(0, game, box))
checkbox.create(game, game.width * 0.7, game.height * 0.35, "AI", False, lambda game,box: SetAI(1, game, box))
checkbox.create(game, game.width * 0.7, game.height * 0.50, "AI", False, lambda game,box: SetAI(2, game, box))
checkbox.create(game, game.width * 0.7, game.height * 0.65, "AI", False, lambda game,box: SetAI(3, game, box))
game.playercount = idx
def StartGame(game):
for x in range(0, game.playercount):
if not len(game.players[x].name):
return
game.has_started = True
def draw(game):
if game.has_started:
# Make sure the playername boxes are gone
textbox.remove(game)
checkbox.remove(game)
# Achtergrond kleur
pygame.draw.rect(game.screen,(204,204,204),(600,0,game.width * 0.9,game.height * 1))
# Teken categorie kleur
pygame.draw.rect(game.screen,(255,0,0),(32,32,110,game.height * 0.8))
pygame.draw.rect(game.screen,(255,239,0),(162,32,110,game.height * 0.8))
pygame.draw.rect(game.screen,(52,163,253),(292,32,110,game.height * 0.8))
pygame.draw.rect(game.screen,(24,208,27),(422,32,110,game.height * 0.8))
game.screen.blit(pygame.image.load("assets\img\dots.png"), (60, 98))
# Start onder categorie
font = pygame.font.Font(None, 48)
font2 = pygame.font.Font(None, 20)
font3 = pygame.font.Font(None, 28)
# label_1 = font.render("Start", 1, (255,255,255))
# size = font.size("Start")
# game.screen.blit(label_1,(45, game.height * 0.9))
# game.screen.blit(label_1,(175, game.height * 0.9))
# game.screen.blit(label_1,(305, game.height * 0.9))
# game.screen.blit(label_1,(435, game.height * 0.9))
# Player turn info
turnlabel = font3.render("It's \"{}'s\" turn.".format(game.get_current_player().name), 1, (255,255,255))
game.screen.blit(turnlabel, (0, 0))
game.screen.blit(font.render("SCORES:", 1, (0,0,0)), (700 - font.size("SCORES:")[0]/2, 10))
sortedlist = sorted(game.players, key=lambda x: x.score, reverse=True)
for x in range(game.playercount):
game.screen.blit(font3.render(str(sortedlist[x].name) + ": " + str(sortedlist[x].score), 1, (0,0,0)), (700 - font3.size(str(sortedlist[x].name) + ": " + str(sortedlist[x].score))[0]/2, 50 + x*25))
# Gamelogic drawing
gamelogic.draw(game)
elif game.playercount:
game.screen.fill((60,60,60))
font = pygame.font.Font(None, 30)
label_1 = font.render(translate.translate("MAKE"), 1, (255,255,255))
size = font.size(translate.translate("MAKE"))
game.screen.blit(label_1,(game.width * 0.32, game.height * 0.1))
# Draw the boxes for the player names
textbox.draw(game)
checkbox.draw(game)
button.draw(game, game.width * 0.4, game.height * 0.8, 64, 32, "Start", 20, (0,0,0), (255,255,255), lambda game: StartGame(game))
else:
game.screen.fill((60,60,60))
button.draw(game, 10, 10, game.width / 10, game.height / 20, translate.translate("BACK"), 20, (25,25,25), (255,255,255), lambda x: game.set_state(game.last_state))
font = pygame.font.Font(None, 30)
label_1 = font.render(translate.translate("AMOUNT"), 1, (255,255,255))
size = font.size(translate.translate("AMOUNT"))
game.screen.blit(label_1,(game.width * 0.37, game.height * 0.2))
button.draw(game, game.width * 0.42, game.height * 0.3, 128, 64, "2", 30, (0,0,0), (255,255,255), lambda game: SetPlayerCount(game, 2))
button.draw(game, game.width * 0.42, game.height * 0.45, 128, 64, "3", 30, (0,0,0), (255,255,255), lambda game: SetPlayerCount(game, 3))
button.draw(game, game.width * 0.42, game.height * 0.60, 128, 64, "4", 30, (0,0,0), (255,255,255), lambda game: SetPlayerCount(game, 4))
# This function is being called when the text in a name box changes
def SetName(idx, game, box):
game.players[idx].setname(box.text)
# This function is called when an AI checkbox is clicked.
def SetAI(idx, game, box):
print("Player {} AI state is {}".format(idx, box.isChecked))
game.players[idx].setai(box.isChecked)
def init(game):
game.isMP = False
| |
import claripy
import nose
import logging
l = logging.getLogger('claripy.test.solver')
solver_list = (claripy.Solver, claripy.SolverReplacement, claripy.SolverHybrid, claripy.SolverComposite, claripy.SolverCacheless)
def test_solver():
for s in solver_list:
yield raw_solver, s
def test_hybrid_solver():
s = claripy.SolverHybrid()
x = claripy.BVS('x', 32, min=0, max=10, stride=2)
y = claripy.BVS('y', 32, min=20, max=30, stride=5)
# TODO: for now, the stride isn't respected in symbolic mode, but we'll fix that next.
# until we do, let's add constraints
s.add(x <= 10)
s.add(x % 2 == 0)
s.add(y >= 20)
s.add(y <= 30)
s.add((y-20) % 5 == 0)
s.add(x != 8)
nose.tools.assert_items_equal(s.eval(x, 20, exact=False), (0, 2, 4, 6, 8, 10))
nose.tools.assert_items_equal(s.eval(x, 20), (0, 2, 4, 6, 10))
nose.tools.assert_items_equal(s.eval(y, 20, exact=False), (20, 25, 30))
nose.tools.assert_items_equal(s.eval(y, 20), (20, 25, 30))
# now constrain things further so that the VSA overapproximates
s.add(x <= 4)
nose.tools.assert_items_equal(s.eval(x, 20, exact=False), (0, 2, 4))
nose.tools.assert_items_equal(s.eval(x, 20), (0, 2, 4))
s.add(y >= 27)
nose.tools.assert_items_equal(s.eval(y, 20, exact=False), (30,))
nose.tools.assert_items_equal(s.eval(y, 20), (30,))
t = claripy.SolverHybrid()
x = claripy.BVS('x', 32)
t.add(x <= 10)
print t.eval(x, 80, exact=False)
nose.tools.assert_equal(len(t.eval(x, 5, exact=False)), 5)
nose.tools.assert_equal(len(t.eval(x, 5, exact=False)), 5)
nose.tools.assert_equal(len(t.eval(x, 6, exact=False)), 6)
nose.tools.assert_equal(len(t.eval(x, 8, exact=False)), 8)
nose.tools.assert_equal(len(t.eval(x, 99, exact=False)), 11)
def test_replacement_solver():
sr = claripy.SolverReplacement()
x = claripy.BVS('x', 32)
nose.tools.assert_equals(len(sr.eval(x, 10)), 10)
sr.add_replacement(x, claripy.BVV(0x101, 32))
nose.tools.assert_items_equal(sr.eval(x, 10), [0x101])
y = claripy.BVS('y', 32)
sr.add([y+1 == 200])
assert (y+1).cache_key in sr._replacements
assert sr._replacement(y+1) is claripy.BVV(200, 32)
srb = sr.branch()
assert len(srb.constraints) == len(sr.constraints) #pylint:disable=no-member
assert (y+1).cache_key in sr._replacements
assert sr._replacement(y+1) is claripy.BVV(200, 32)
sr = claripy.SolverReplacement()
b = claripy.BoolS('b')
assert sr._replacement(b) is b
sr.add(claripy.Not(b))
assert sr._replacement(b) is claripy.false
sr = claripy.SolverReplacement(claripy.SolverVSA(), complex_auto_replace=True)
x = claripy.BVS('x', 64)
sr.add([x + 8 <= 0xffffffffffffffff])
sr.add([x + 8 >= 0])
assert sr._replacement(x) is not x
def raw_solver(solver_type):
#bc = claripy.backends.BackendConcrete(clrp)
#bz = claripy.backends.BackendZ3(clrp)
#claripy.expression_backends = [ bc, bz, ba ]
print "YOYO"
s = solver_type()
s.simplify()
x = claripy.BVS('x', 32)
y = claripy.BVS('y', 32)
z = claripy.BVS('z', 32)
l.debug("adding constraints")
s.add(x == 10)
s.add(y == 15)
# Batch evaluation
results = s.batch_eval([x + 5, x + 6, 3], 2)
nose.tools.assert_equal(len(results), 1)
nose.tools.assert_equal(results[0][0], 15) # x + 5
nose.tools.assert_equal(results[0][1], 16) # x + 6
nose.tools.assert_equal(results[0][2], 3) # constant
l.debug("checking")
nose.tools.assert_true(s.satisfiable())
nose.tools.assert_false(s.satisfiable(extra_constraints=[x == 5]))
nose.tools.assert_equal(s.eval(x + 5, 1)[0], 15)
nose.tools.assert_true(s.solution(x + 5, 15))
nose.tools.assert_true(s.solution(x, 10))
nose.tools.assert_true(s.solution(y, 15))
nose.tools.assert_false(s.solution(y, 13))
shards = s.split()
nose.tools.assert_equal(len(shards), 2)
nose.tools.assert_equal(len(shards[0].variables), 1)
nose.tools.assert_equal(len(shards[1].variables), 1)
if isinstance(s, claripy.frontend_mixins.ConstraintExpansionMixin) or (
isinstance(s, claripy.frontends.HybridFrontend) and
isinstance(s._exact_frontend, claripy.frontend_mixins.ConstraintExpansionMixin)
): #the hybrid frontend actually uses the exact frontend for the split
nose.tools.assert_equal({ len(shards[0].constraints), len(shards[1].constraints) }, { 2, 1 }) # adds the != from the solution() check
if isinstance(s, claripy.frontends.ReplacementFrontend):
nose.tools.assert_equal({ len(shards[0].constraints), len(shards[1].constraints) }, { 1, 1 }) # not a caching frontend
# test result caching
s = solver_type()
s.add(x == 10)
s.add(y == 15)
nose.tools.assert_false(s.satisfiable(extra_constraints=(x==5,)))
nose.tools.assert_true(s.satisfiable())
s = solver_type()
#claripy.expression_backends = [ bc, ba, bz ]
s.add(claripy.UGT(x, 10))
s.add(claripy.UGT(x, 20))
s.simplify()
nose.tools.assert_equal(len(s.constraints), 1)
#nose.tools.assert_equal(str(s.constraints[0]._obj), "Not(ULE(x <= 20))")
s.add(claripy.UGT(y, x))
s.add(claripy.ULT(z, 5))
# test that duplicate constraints are ignored
old_count = len(s.constraints)
s.add(claripy.ULT(z, 5))
nose.tools.assert_equal(len(s.constraints), old_count)
#print "========================================================================================"
#print "========================================================================================"
#print "========================================================================================"
#print "========================================================================================"
#a = s.eval(z, 100)
#print "ANY:", a
#print "========================================================================================"
#mx = s.max(z)
#print "MAX",mx
#print "========================================================================================"
#mn = s.min(z)
#print "MIN",mn
#print "========================================================================================"
#print "========================================================================================"
#print "========================================================================================"
#print "========================================================================================"
print "CONSTRATINT COUNTS:", [ len(_.constraints) for _ in s.split() ]
nose.tools.assert_equal(s.max(z), 4)
nose.tools.assert_equal(s.min(z), 0)
nose.tools.assert_equal(s.min(y), 22)
nose.tools.assert_equal(s.max(y), 2**y.size()-1)
print "CONSTRATINT COUNTS:", [ len(_.constraints) for _ in s.split() ]
ss = s.split()
nose.tools.assert_equal(len(ss), 2)
#if isinstance(s, claripy.frontend_mixins.ConstraintExpansionMixin):
# nose.tools.assert_equal({ len(_.constraints) for _ in ss }, { 3, 2 }) # constraints from min or max
# Batch evaluation
s.add(y < 24)
s.add(z < x) # Just to make sure x, y, and z belong to the same solver, since batch evaluation does not support the
# situation where expressions belong to more than one solver
results = s.batch_eval([x, y, z], 20)
nose.tools.assert_set_equal(
set(results),
{(21L, 23L, 1L), (22L, 23L, 3L), (22L, 23L, 2L), (22L, 23L, 4L), (21L, 22L, 4L), (21L, 23L, 4L), (22L, 23L, 0L),
(22L, 23L, 1L), (21L, 22L, 1L), (21L, 22L, 3L), (21L, 22L, 2L), (21L, 22L, 0L), (21L, 23L, 0L), (21L, 23L, 2L),
(21L, 23L, 3L)
}
)
# test that False makes it unsat
s = solver_type()
s.add(claripy.BVV(1,1) == claripy.BVV(1,1))
nose.tools.assert_true(s.satisfiable())
s.add(claripy.BVV(1,1) == claripy.BVV(0,1))
nose.tools.assert_false(s.satisfiable())
# test extra constraints
s = solver_type()
x = claripy.BVS('x', 32)
nose.tools.assert_items_equal(s.eval(x, 2, extra_constraints=[x==10]), ( 10, ))
s.add(x == 10)
nose.tools.assert_false(s.solution(x, 2))
nose.tools.assert_true(s.solution(x, 10))
# test result caching
if isinstance(s, claripy.frontend_mixins.ModelCacheMixin):
count = claripy._backends_module.backend_z3.solve_count
s = solver_type()
x = claripy.BVS('x', 32)
s.add(x == 10)
nose.tools.assert_true(s.satisfiable())
assert claripy._backends_module.backend_z3.solve_count == count + 1
nose.tools.assert_equals(s.eval(x, 1)[0], 10)
assert claripy._backends_module.backend_z3.solve_count == count + 1
s.add(x == 10)
s.add(x > 9)
nose.tools.assert_equals(s.eval(x, 1)[0], 10)
assert claripy._backends_module.backend_z3.solve_count == count + 1
y = claripy.BVS('y', 32)
s.add(y < 999)
assert s.satisfiable()
assert claripy._backends_module.backend_z3.solve_count == count + 1
nose.tools.assert_equals(s.eval(y, 1)[0], 0)
assert claripy._backends_module.backend_z3.solve_count == count + 1
def test_solver_branching():
for s in solver_list:
yield raw_solver_branching, s
def raw_solver_branching(solver_type):
s = solver_type()
x = claripy.BVS("x", 32)
y = claripy.BVS("y", 32)
s.add(claripy.UGT(x, y))
s.add(claripy.ULT(x, 10))
nose.tools.assert_greater(s.eval(x, 1)[0], 0)
t = s.branch()
if isinstance(s, claripy.frontends.FullFrontend):
nose.tools.assert_is(s._tls.solver, t._tls.solver)
nose.tools.assert_true(s._finalized)
nose.tools.assert_true(t._finalized)
t.add(x == 5)
#if isinstance(s, claripy.FullFrontend):
# nose.tools.assert_is(t._solver, None)
s.add(x == 3)
nose.tools.assert_true(s.satisfiable())
t.add(x == 3)
nose.tools.assert_false(t.satisfiable())
s.add(y == 2)
nose.tools.assert_true(s.satisfiable())
nose.tools.assert_equals(s.eval(x, 1)[0], 3)
nose.tools.assert_equals(s.eval(y, 1)[0], 2)
nose.tools.assert_false(t.satisfiable())
def test_combine():
for s in solver_list:
yield raw_combine, s
def raw_combine(solver_type):
s10 = solver_type()
s20 = solver_type()
s30 = solver_type()
x = claripy.BVS("x", 32)
s10.add(x >= 10)
s20.add(x <= 20)
s30.add(x == 30)
nose.tools.assert_true(s10.satisfiable())
nose.tools.assert_true(s20.satisfiable())
nose.tools.assert_true(s30.satisfiable())
nose.tools.assert_true(s10.combine([s20]).satisfiable())
nose.tools.assert_true(s20.combine([s10]).satisfiable())
nose.tools.assert_true(s30.combine([s10]).satisfiable())
nose.tools.assert_false(s30.combine([s20]).satisfiable())
nose.tools.assert_items_equal(s30.combine([s10]).eval(x, 1), ( 30, ))
nose.tools.assert_equal(len(s30.combine([s10]).constraints), 2)
def test_composite_solver():
#pylint:disable=no-member
s = claripy.SolverComposite()
x = claripy.BVS("x", 32)
y = claripy.BVS("y", 32)
z = claripy.BVS("z", 32)
c = claripy.And(x == 1, y == 2, z == 3)
s.add(c)
nose.tools.assert_equals(len(s._solver_list), 3)
nose.tools.assert_true(s.satisfiable())
s.add(x < y)
nose.tools.assert_equal(len(s._solver_list), 2)
nose.tools.assert_true(s.satisfiable())
s.simplify()
nose.tools.assert_equal(len(s._solver_list), 3)
nose.tools.assert_true(s.satisfiable())
s1 = s.branch()
nose.tools.assert_equal(len(s1._solver_list), 3)
s1.add(x > y)
nose.tools.assert_equal(len(s1._solver_list), 2)
nose.tools.assert_false(s1.satisfiable())
nose.tools.assert_equal(len(s._solver_list), 3)
nose.tools.assert_true(s.satisfiable())
s.add(claripy.BVV(1, 32) == claripy.BVV(2, 32))
nose.tools.assert_equal(len(s._solver_list), 3)
nose.tools.assert_false(s.satisfiable())
ss = s.branch()
nose.tools.assert_equal(len(ss._solver_list), 3)
nose.tools.assert_false(ss.satisfiable())
s = claripy.SolverComposite()
x = claripy.BVS("x", 32)
y = claripy.BVS("y", 32)
z = claripy.BVS("z", 32)
c = claripy.And(x == 1, y == 2, z == 3)
s.add(c)
if isinstance(s._template_frontend, claripy.frontend_mixins.ModelCacheMixin):
assert len(s._solver_list) == 3
count = claripy._backends_module.backend_z3.solve_count
assert s.satisfiable()
assert claripy._backends_module.backend_z3.solve_count == count + 3
assert list(s.eval(x+y, 1)) == [3]
assert claripy._backends_module.backend_z3.solve_count == count + 3
def test_minmax():
s = claripy.Solver()
x = claripy.BVS("x", 32)
nose.tools.assert_equal(s.max(x), 2**32-1)
nose.tools.assert_equal(s.min(x), 0)
nose.tools.assert_true(s.satisfiable())
def test_composite_discrepancy():
a = claripy.BVS("a", 8)
b = claripy.BVS("b", 8)
x = claripy.BVS("x", 32)
y = claripy.BVS("y", 32)
z = claripy.BVS("z", 32)
xy = x + y
dst = claripy.BVV(0xbaaaaf50, 32) + xy
constraints = [ ]
constraints.append(x <= 0x1)
constraints.append(x != 0x0)
constraints.append(claripy.SignExt(24, claripy.If(x > 0x0, a, 0)) != 0xa)
constraints.append(x < 0x80)
constraints.append(y <= 0x1)
constraints.append(x == 0x1)
constraints.append((0xbaaaaf50 + x) == 0xbaaaaf51)
constraints.append(y != 0x0)
constraints.append(claripy.SignExt(24, claripy.If(y > 0x0, b, 0)) != 0xa)
constraints.append((x + y) < 0x80)
constraints.append(z <= 0x1)
constraints.append((x + y) == 0x2)
sn = claripy.Solver()
sc = claripy.SolverComposite()
sn.add(constraints)
sc.add(constraints)
print sn.max(dst), sc.max(dst)
print sn.min(dst), sc.min(dst)
assert sn.max(dst) == sc.max(dst)
assert sn.min(dst) == sc.min(dst)
def test_model():
x = claripy.BVS("x", 32)
y = claripy.BVS("y", 32)
s = claripy.Solver()
s.add(x < 10)
assert sorted(s.eval(x, 20)) == range(10)
s.add(y == 1337)
assert sorted(s.eval(x, 20)) == range(10)
def test_unsatness():
x = claripy.BVS("x", 32)
s = claripy.Solver()
s.add(x == 10)
assert s.satisfiable()
s.add(claripy.false)
assert not s.satisfiable()
def test_simplification_annotations():
s = claripy.Solver()
x = claripy.BVS("x", 32)
s.add(x > 10)
s.add(x > 11)
s.add((x > 12).annotate(claripy.SimplificationAvoidanceAnnotation()))
assert len(s.constraints) == 3
s.simplify()
assert len(s.constraints) == 2
if __name__ == '__main__':
test_simplification_annotations()
test_model()
test_composite_discrepancy()
for func, param in test_solver():
func(param)
test_hybrid_solver()
test_replacement_solver()
test_minmax()
test_solver_branching()
for func, param in test_solver_branching():
func(param)
for func, param in test_combine():
func(param)
test_composite_solver()
| |
'''
Firefox 2
@author: Peter Parente <parente@cs.unc.edu>
@copyright: Copyright (c) 2008 Peter Parente
@license: BSD License
All rights reserved. This program and the accompanying materials are made
available under the terms of The BSD License which accompanies this
distribution, and is available at
U{http://www.opensource.org/licenses/bsd-license.php}
'''
import re
import UIA, Interface, Config
from protocols import advise
from Chooser import Chooser
from UIA import Adapters
from View import Task, Control
address_path = ['/application[3]/tool bar[4]/tool bar[1]/combo box[5]/editable text[2]', '/application[3]/tool bar[3]/tool bar[1]/combo box[5]/editable text[2]', '/application[3]/tool bar[2]/tool bar[1]/combo box[5]/editable text[2]']
# keeping our bookmarks in the script to overcome MSAA shortcomings in FF2
BM_URLS= ['file:///c:/cygwin/home/parente/studies/final/resources/memory/p1.html',
'file:///c:/cygwin/home/parente/studies/final/resources/memory/p2.html',
'file:///c:/cygwin/home/parente/studies/final/resources/memory/p3.html',
'file:///c:/cygwin/home/parente/studies/final/resources/memory/p4.html',
'file:///c:/cygwin/home/parente/studies/final/resources/workflow/wiki/index.html']
BM_NAMES = ['First passage', 'Second passage', 'Third passage',
'Fourth passage', 'Home, Stu Grey - XYZ Wiki']
class BookmarkAdapter(object):
advise(instancesProvide=[Interface.IOption])
def __init__(self, name, url):
self.name = name
self.url = url
def GetName(self):
return self.name
def GetObject(self):
return self.url
def GenerateFromLists(cls, names, urls):
for name, url in zip(names, urls):
yield BookmarkAdapter(name, url)
GenerateFromLists = classmethod(GenerateFromLists)
class Main(UIA.Macro):
# the macro that's run at the very start of the program
def Sequence(self):
# watch for firefox
self.WatchForNewWindow(ClassName='MozillaUIWindowClass', RoleText='window')
# run firefox
self.RunFile('c:/program files/mozilla firefox/firefox.exe -P test')
yield False
# find the root window
while self.result.Path != '/':
self.result = self.result.Parent
yield True
class DoCreateNewTab(UIA.Macro):
def Sequence(self):
# @todo watch for new tab to appear
# create the new tab
self.SendKeys('%{f}t')
#yield False
yield True
class DoFollowAddress(UIA.Macro):
def Sequence(self):
# @todo: change the name on the task to indicate waiting
# watch for state change on ROLE_DOCUMENT adding busy
self.WatchForEvents([UIA.Constants.EVENT_OBJECT_STATECHANGE],
Role=UIA.Constants.ROLE_SYSTEM_DOCUMENT, survive=True)
self.SendKeys('{Enter}')
yield False
# check for state busy
while 1:
try:
state = self.result.GetState()
except Exception:
yield False
else:
yield not (state & UIA.Constants.STATE_SYSTEM_BUSY)
yield True
class DoTypeURLInAddress(UIA.Macro):
def Sequence(self):
# check if address bar already has focus
for ap in address_path:
c = Interface.IContext(self.task.parent)
addr = c.GetObjectAt(ap)
if addr is not None:
break
if not (addr.Parent.GetState() & UIA.Constants.STATE_SYSTEM_FOCUSED):
# watch for focus event
self.WatchForEvents([UIA.Constants.EVENT_OBJECT_FOCUS], Name='Location',
RoleText='combo box')
# press hotkey to go to address bar
self.SendKeys('%{d}')
yield False
# enter the link text into the bar
self.SendKeys(self.url)
# @todo: change the name on the task to indicate waiting
# watch for state change on ROLE_DOCUMENT adding busy
self.WatchForEvents([UIA.Constants.EVENT_OBJECT_STATECHANGE],
Role=UIA.Constants.ROLE_SYSTEM_DOCUMENT, survive=True)
self.SendKeys('{Enter}')
yield False
# check for state busy
while 1:
try:
state = self.result.GetState()
except Exception:
yield False
else:
yield not (state & UIA.Constants.STATE_SYSTEM_BUSY)
yield True
class DoFollowLink(UIA.Macro):
def Sequence(self):
# @todo change the name on the task to indicate waiting
# watch for state change on ROLE_DOCUMENT adding busy
self.WatchForEvents([UIA.Constants.EVENT_OBJECT_STATECHANGE],
Role=UIA.Constants.ROLE_SYSTEM_DOCUMENT, survive=True)
self.document.FollowLink()
yield False
# check for state busy
while 1:
try:
state = self.result.GetState()
except Exception:
yield False
else:
yield not (state & UIA.Constants.STATE_SYSTEM_BUSY)
yield True
class DoNavigateHistory(UIA.Macro):
def Sequence(self):
# get direction from instance variable
if self.direction == 'back':
# back key
key = '%{Left}'
else:
# forward key
key = '%{Right}'
# watch for state change on ROLE_DOCUMENT adding busy
self.WatchForEvents([UIA.Constants.EVENT_OBJECT_STATECHANGE],
Role=UIA.Constants.ROLE_SYSTEM_DOCUMENT, survive=True)
# do key press
self.SendKeys(key)
# check for state busy
while 1:
try:
state = self.result.GetState()
except Exception:
yield False
else:
yield not (state & UIA.Constants.STATE_SYSTEM_BUSY)
yield True
class DoShowAddress(UIA.Macro):
def Sequence(self):
# watch for focus event
self.WatchForEvents([UIA.Constants.EVENT_OBJECT_FOCUS], Name='Location:',
RoleText='editable text')
# press hotkey to go to address bar
self.SendKeys('%{d}')
yield False
yield True
class BrowseDocument(Task.FormFill):
Name = 'browse document'
back_path = ['/application[3]/tool bar[3]/tool bar[1]/push button[0]',
'/application[3]/tool bar[2]/tool bar[1]/push button[0]']
forward_path = ['/application[3]/tool bar[3]/tool bar[1]/push button[1]',
'/application[3]/tool bar[2]/tool bar[1]/push button[1]']
def OnActivate(self, message, auto_focus):
if message and message.ResultData is not None:
# some back/foward task ended, update our model
self.model = message.ResultData
Interface.IInteractive(self.fields[0].Model).HasChanged()
return super(BrowseDocument, self).OnActivate(message, auto_focus)
def OnInit(self):
# add hypertext browsing model and view
# task model is the document object, so path is always root
ht = Adapters.HypertextDocument(self, '/')
document = Control.DocumentReading(self, ht, 'web page', spell=False)
self.AddField(document)
# add conditional back/forward tasks
def GoBackCondition():
for p in self.back_path:
c = Interface.IContext(self.parent).GetObjectAt(p)
if c is not None: break
try: return not c.IsNotReady()
except: return False
def GoForwardCondition():
for p in self.forward_path:
c = Interface.IContext(self.parent).GetObjectAt(p)
if c is not None: break
try: return not c.IsNotReady()
except: return False
# add secondary tasks to the appropriate controls
document.AddContextOption(GoBack, True, GoBackCondition)
document.AddContextOption(GoForward, True, GoForwardCondition)
def OnLoseFocus(self, message):
self.Name = self.fields[0].Model.GetTitle()
def OnDoThat(self, message):
if not message.Press:
return
# check if the current document pointer is a link
doc = Interface.IHypertext(self.fields[0].Model)
if not doc.IsLink():
return
p = self.OutWaiting(message, False)
self.Output(self, p)
# stop accepting input
self.Pause()
# run the link following macro
m = DoFollowLink(self, message, self.model, document=doc)
result = m.Execute()
# call DoneLink to unpause
result.AddCallback(self.DoneLink)
def DoneLink(self, result, message):
'''
Unpauses this control and plays OutIntroduction on the document
@param result: Connection object for the last step of the DoFollowLink
macro, stored as the new document model
@type result: pyAA.AccessibleObject
@param message: Message that caused this event handler to fire
@type message: L{Input.Messages.InboundMessage}
'''
self.model = result
# replace the model of the document view
ht = Adapters.HypertextDocument(self, '/')
self.fields[0].Model = ht
# reintroduce task
p = self.OutIntroduction(message, False)
self.Output(self, p)
# re-activate hypertext control
self.ChangeFocus(self.fields[0], message, True)
# resume accepting keystrokes
self.Unpause()
class GoBack(Task.RunWaitReport):
Name = 'go back'
StartMacro = DoNavigateHistory(direction='back')
Modal = True
# no successor, just resume the browsing task that started this one and
# let it update its pointer to the start of the new document
class GoForward(Task.RunWaitReport):
Name = 'go forward'
StartMacro = DoNavigateHistory(direction='forward')
Modal = True
class GoToAddress(Task.FormFill):
Name = 'go to address'
StartMacro = DoShowAddress
CompleteMacro = DoFollowAddress
Successor = BrowseDocument
class BrowseBookmarks(Task.FormFill):
Name = 'browse bookmarks'
StartMacro = DoCreateNewTab
CompleteMacro = DoTypeURLInAddress
Successor = BrowseDocument
def OnInit(self):
# create a chooser model
ch = Chooser(BookmarkAdapter.GenerateFromLists(BM_NAMES, BM_URLS))
# add list view
lv= Control.List(self, ch, 'bookmarks', label='bookmark')
self.AddField(lv)
def OnReadyToComplete(self, message):
# get value from list
self.ready = False
# get the selected URL
url = self.fields[0].Model.GetSelectedItem().GetObject()
# call completion macro with value provided
m = self.CompleteMacro(self, message, self.model, url=url)
result = m.Execute()
result.AddCallback(self.OnComplete)
Tasks = [BrowseBookmarks] #,GoToAddress
AutoTasks = []
| |
"""Helpers for components that manage entities."""
import asyncio
from datetime import timedelta
from homeassistant import config as conf_util
from homeassistant.setup import async_prepare_setup_platform
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_SCAN_INTERVAL, CONF_ENTITY_NAMESPACE,
DEVICE_DEFAULT_NAME)
from homeassistant.core import callback, valid_entity_id
from homeassistant.exceptions import HomeAssistantError, PlatformNotReady
from homeassistant.loader import get_component
from homeassistant.helpers import config_per_platform, discovery
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.event import (
async_track_time_interval, async_track_point_in_time)
from homeassistant.helpers.service import extract_entity_ids
from homeassistant.util import slugify
from homeassistant.util.async import (
run_callback_threadsafe, run_coroutine_threadsafe)
import homeassistant.util.dt as dt_util
DEFAULT_SCAN_INTERVAL = timedelta(seconds=15)
SLOW_SETUP_WARNING = 10
SLOW_SETUP_MAX_WAIT = 60
PLATFORM_NOT_READY_RETRIES = 10
class EntityComponent(object):
"""Helper class that will help a component manage its entities."""
def __init__(self, logger, domain, hass,
scan_interval=DEFAULT_SCAN_INTERVAL, group_name=None):
"""Initialize an entity component."""
self.logger = logger
self.hass = hass
self.domain = domain
self.entity_id_format = domain + '.{}'
self.scan_interval = scan_interval
self.group_name = group_name
self.entities = {}
self.config = None
self._platforms = {
'core': EntityPlatform(self, domain, self.scan_interval, 0, None),
}
self.async_add_entities = self._platforms['core'].async_add_entities
self.add_entities = self._platforms['core'].add_entities
def setup(self, config):
"""Set up a full entity component.
This doesn't block the executor to protect from deadlocks.
"""
self.hass.add_job(self.async_setup(config))
@asyncio.coroutine
def async_setup(self, config):
"""Set up a full entity component.
Loads the platforms from the config and will listen for supported
discovered platforms.
This method must be run in the event loop.
"""
self.config = config
# Look in config for Domain, Domain 2, Domain 3 etc and load them
tasks = []
for p_type, p_config in config_per_platform(config, self.domain):
tasks.append(self._async_setup_platform(p_type, p_config))
if tasks:
yield from asyncio.wait(tasks, loop=self.hass.loop)
# Generic discovery listener for loading platform dynamically
# Refer to: homeassistant.components.discovery.load_platform()
@callback
def component_platform_discovered(platform, info):
"""Handle the loading of a platform."""
self.hass.async_add_job(
self._async_setup_platform(platform, {}, info))
discovery.async_listen_platform(
self.hass, self.domain, component_platform_discovered)
def extract_from_service(self, service, expand_group=True):
"""Extract all known entities from a service call.
Will return all entities if no entities specified in call.
Will return an empty list if entities specified but unknown.
"""
return run_callback_threadsafe(
self.hass.loop, self.async_extract_from_service, service,
expand_group
).result()
def async_extract_from_service(self, service, expand_group=True):
"""Extract all known and available entities from a service call.
Will return all entities if no entities specified in call.
Will return an empty list if entities specified but unknown.
This method must be run in the event loop.
"""
if ATTR_ENTITY_ID not in service.data:
return [entity for entity in self.entities.values()
if entity.available]
return [self.entities[entity_id] for entity_id
in extract_entity_ids(self.hass, service, expand_group)
if entity_id in self.entities and
self.entities[entity_id].available]
@asyncio.coroutine
def _async_setup_platform(self, platform_type, platform_config,
discovery_info=None, tries=0):
"""Set up a platform for this component.
This method must be run in the event loop.
"""
platform = yield from async_prepare_setup_platform(
self.hass, self.config, self.domain, platform_type)
if platform is None:
return
# Config > Platform > Component
scan_interval = (
platform_config.get(CONF_SCAN_INTERVAL) or
getattr(platform, 'SCAN_INTERVAL', None) or self.scan_interval)
parallel_updates = getattr(
platform, 'PARALLEL_UPDATES',
int(not hasattr(platform, 'async_setup_platform')))
entity_namespace = platform_config.get(CONF_ENTITY_NAMESPACE)
key = (platform_type, scan_interval, entity_namespace)
if key not in self._platforms:
entity_platform = self._platforms[key] = EntityPlatform(
self, platform_type, scan_interval, parallel_updates,
entity_namespace)
else:
entity_platform = self._platforms[key]
self.logger.info("Setting up %s.%s", self.domain, platform_type)
warn_task = self.hass.loop.call_later(
SLOW_SETUP_WARNING, self.logger.warning,
"Setup of platform %s is taking over %s seconds.", platform_type,
SLOW_SETUP_WARNING)
try:
if getattr(platform, 'async_setup_platform', None):
task = platform.async_setup_platform(
self.hass, platform_config,
entity_platform.async_schedule_add_entities, discovery_info
)
else:
# This should not be replaced with hass.async_add_job because
# we don't want to track this task in case it blocks startup.
task = self.hass.loop.run_in_executor(
None, platform.setup_platform, self.hass, platform_config,
entity_platform.schedule_add_entities, discovery_info
)
yield from asyncio.wait_for(
asyncio.shield(task, loop=self.hass.loop),
SLOW_SETUP_MAX_WAIT, loop=self.hass.loop)
yield from entity_platform.async_block_entities_done()
self.hass.config.components.add(
'{}.{}'.format(self.domain, platform_type))
except PlatformNotReady:
tries += 1
wait_time = min(tries, 6) * 30
self.logger.warning(
'Platform %s not ready yet. Retrying in %d seconds.',
platform_type, wait_time)
async_track_point_in_time(
self.hass, self._async_setup_platform(
platform_type, platform_config, discovery_info, tries),
dt_util.utcnow() + timedelta(seconds=wait_time))
except asyncio.TimeoutError:
self.logger.error(
"Setup of platform %s is taking longer than %s seconds."
" Startup will proceed without waiting any longer.",
platform_type, SLOW_SETUP_MAX_WAIT)
except Exception: # pylint: disable=broad-except
self.logger.exception(
"Error while setting up platform %s", platform_type)
finally:
warn_task.cancel()
def add_entity(self, entity, platform=None, update_before_add=False):
"""Add entity to component."""
return run_coroutine_threadsafe(
self.async_add_entity(entity, platform, update_before_add),
self.hass.loop
).result()
@asyncio.coroutine
def async_add_entity(self, entity, platform=None, update_before_add=False):
"""Add entity to component.
This method must be run in the event loop.
"""
if entity is None or entity in self.entities.values():
return False
entity.hass = self.hass
# Update properties before we generate the entity_id
if update_before_add:
try:
yield from entity.async_device_update(warning=False)
except Exception: # pylint: disable=broad-except
self.logger.exception("Error on device update!")
return False
# Write entity_id to entity
if getattr(entity, 'entity_id', None) is None:
object_id = entity.name or DEVICE_DEFAULT_NAME
if platform is not None and platform.entity_namespace is not None:
object_id = '{} {}'.format(platform.entity_namespace,
object_id)
entity.entity_id = async_generate_entity_id(
self.entity_id_format, object_id,
self.entities.keys())
# Make sure it is valid in case an entity set the value themselves
if entity.entity_id in self.entities:
raise HomeAssistantError(
'Entity id already exists: {}'.format(entity.entity_id))
elif not valid_entity_id(entity.entity_id):
raise HomeAssistantError(
'Invalid entity id: {}'.format(entity.entity_id))
self.entities[entity.entity_id] = entity
if hasattr(entity, 'async_added_to_hass'):
yield from entity.async_added_to_hass()
yield from entity.async_update_ha_state()
return True
def update_group(self):
"""Set up and/or update component group."""
run_callback_threadsafe(
self.hass.loop, self.async_update_group).result()
@callback
def async_update_group(self):
"""Set up and/or update component group.
This method must be run in the event loop.
"""
if self.group_name is not None:
ids = sorted(self.entities,
key=lambda x: self.entities[x].name or x)
group = get_component('group')
group.async_set_group(
self.hass, slugify(self.group_name), name=self.group_name,
visible=False, entity_ids=ids
)
def reset(self):
"""Remove entities and reset the entity component to initial values."""
run_coroutine_threadsafe(self.async_reset(), self.hass.loop).result()
@asyncio.coroutine
def async_reset(self):
"""Remove entities and reset the entity component to initial values.
This method must be run in the event loop.
"""
tasks = [platform.async_reset() for platform
in self._platforms.values()]
if tasks:
yield from asyncio.wait(tasks, loop=self.hass.loop)
self._platforms = {
'core': self._platforms['core']
}
self.entities = {}
self.config = None
if self.group_name is not None:
group = get_component('group')
group.async_remove(self.hass, slugify(self.group_name))
def prepare_reload(self):
"""Prepare reloading this entity component."""
return run_coroutine_threadsafe(
self.async_prepare_reload(), loop=self.hass.loop).result()
@asyncio.coroutine
def async_prepare_reload(self):
"""Prepare reloading this entity component.
This method must be run in the event loop.
"""
try:
conf = yield from \
conf_util.async_hass_config_yaml(self.hass)
except HomeAssistantError as err:
self.logger.error(err)
return None
conf = conf_util.async_process_component_config(
self.hass, conf, self.domain)
if conf is None:
return None
yield from self.async_reset()
return conf
class EntityPlatform(object):
"""Keep track of entities for a single platform and stay in loop."""
def __init__(self, component, platform, scan_interval, parallel_updates,
entity_namespace):
"""Initialize the entity platform."""
self.component = component
self.platform = platform
self.scan_interval = scan_interval
self.parallel_updates = None
self.entity_namespace = entity_namespace
self.platform_entities = []
self._tasks = []
self._async_unsub_polling = None
self._process_updates = asyncio.Lock(loop=component.hass.loop)
if parallel_updates:
self.parallel_updates = asyncio.Semaphore(
parallel_updates, loop=component.hass.loop)
@asyncio.coroutine
def async_block_entities_done(self):
"""Wait until all entities add to hass."""
if self._tasks:
pending = [task for task in self._tasks if not task.done()]
self._tasks.clear()
if pending:
yield from asyncio.wait(pending, loop=self.component.hass.loop)
def schedule_add_entities(self, new_entities, update_before_add=False):
"""Add entities for a single platform."""
run_callback_threadsafe(
self.component.hass.loop,
self.async_schedule_add_entities, list(new_entities),
update_before_add
).result()
@callback
def async_schedule_add_entities(self, new_entities,
update_before_add=False):
"""Add entities for a single platform async."""
self._tasks.append(self.component.hass.async_add_job(
self.async_add_entities(
new_entities, update_before_add=update_before_add)
))
def add_entities(self, new_entities, update_before_add=False):
"""Add entities for a single platform."""
# That avoid deadlocks
if update_before_add:
self.component.logger.warning(
"Call 'add_entities' with update_before_add=True "
"only inside tests or you can run into a deadlock!")
run_coroutine_threadsafe(
self.async_add_entities(list(new_entities), update_before_add),
self.component.hass.loop).result()
@asyncio.coroutine
def async_add_entities(self, new_entities, update_before_add=False):
"""Add entities for a single platform async.
This method must be run in the event loop.
"""
# handle empty list from component/platform
if not new_entities:
return
@asyncio.coroutine
def async_process_entity(new_entity):
"""Add entities to StateMachine."""
new_entity.parallel_updates = self.parallel_updates
ret = yield from self.component.async_add_entity(
new_entity, self, update_before_add=update_before_add
)
if ret:
self.platform_entities.append(new_entity)
tasks = [async_process_entity(entity) for entity in new_entities]
yield from asyncio.wait(tasks, loop=self.component.hass.loop)
self.component.async_update_group()
if self._async_unsub_polling is not None or \
not any(entity.should_poll for entity
in self.platform_entities):
return
self._async_unsub_polling = async_track_time_interval(
self.component.hass, self._update_entity_states, self.scan_interval
)
@asyncio.coroutine
def async_reset(self):
"""Remove all entities and reset data.
This method must be run in the event loop.
"""
if not self.platform_entities:
return
tasks = [entity.async_remove() for entity in self.platform_entities]
yield from asyncio.wait(tasks, loop=self.component.hass.loop)
if self._async_unsub_polling is not None:
self._async_unsub_polling()
self._async_unsub_polling = None
@asyncio.coroutine
def _update_entity_states(self, now):
"""Update the states of all the polling entities.
To protect from flooding the executor, we will update async entities
in parallel and other entities sequential.
This method must be run in the event loop.
"""
if self._process_updates.locked():
self.component.logger.warning(
"Updating %s %s took longer than the scheduled update "
"interval %s", self.platform, self.component.domain,
self.scan_interval)
return
with (yield from self._process_updates):
tasks = []
for entity in self.platform_entities:
if not entity.should_poll:
continue
tasks.append(entity.async_update_ha_state(True))
if tasks:
yield from asyncio.wait(tasks, loop=self.component.hass.loop)
| |
import btk
import unittest
import _TDDConfigure
class IMUsExtractorTest(unittest.TestCase):
def test_NoAcquisition(self):
imuse = btk.btkIMUsExtractor()
output = imuse.GetOutput()
output.Update()
self.assertEqual(output.GetItemNumber(), 0)
def test_NoIMU(self):
imuse = btk.btkIMUsExtractor()
imuse.SetInput(btk.btkAcquisition())
output = imuse.GetOutput()
output.Update()
self.assertEqual(output.GetItemNumber(), 0)
def test_OneAcquisition(self):
acq = btk.btkAcquisition()
acq.Init(0,200,6)
imu = btk.btkMetaDataCreateChild(acq.GetMetaData(), 'IMU')
btk.btkMetaDataCreateChild(imu, 'USED', 1)
btk.btkMetaDataCreateChild(imu, 'LABELS', btk.btkStringArray(1,'Foo'))
btk.btkMetaDataCreateChild(imu, 'DESCRIPTIONS', btk.btkStringArray(1,'BAR'))
btk.btkMetaDataCreateChild(imu, 'TYPE', btk.btkIntArray(1,1))
channels = btk.btkIntArray(6); channels[0] = 1; channels[1] = 2; channels[2] = 3; channels[3] = 4; channels[4] = 5; channels[5] = 6
btk.btkMetaDataCreateChild(imu, 'CHANNEL', channels, 6)
imuse = btk.btkIMUsExtractor()
imuse.SetInput(acq)
output = imuse.GetOutput()
output.Update()
self.assertEqual(output.GetItemNumber(), 1)
self.assertEqual(output.GetItem(0).GetLabel(), 'Foo')
self.assertEqual(output.GetItem(0).GetDescription(), 'BAR')
self.assertEqual(output.GetItem(0).GetAccelerometerX().GetLabel(), acq.GetAnalog(0).GetLabel())
self.assertEqual(output.GetItem(0).GetAccelerometerY().GetLabel(), acq.GetAnalog(1).GetLabel())
self.assertEqual(output.GetItem(0).GetAccelerometerZ().GetLabel(), acq.GetAnalog(2).GetLabel())
self.assertEqual(output.GetItem(0).GetGyroscopeX().GetLabel(), acq.GetAnalog(3).GetLabel())
self.assertEqual(output.GetItem(0).GetGyroscopeY().GetLabel(), acq.GetAnalog(4).GetLabel())
self.assertEqual(output.GetItem(0).GetGyroscopeZ().GetLabel(), acq.GetAnalog(5).GetLabel())
def test_OneAcquisition_NoDescriptions(self):
acq = btk.btkAcquisition()
acq.Init(0,200,6)
imu = btk.btkMetaDataCreateChild(acq.GetMetaData(), 'IMU')
btk.btkMetaDataCreateChild(imu, 'USED', 1)
btk.btkMetaDataCreateChild(imu, 'LABELS', btk.btkStringArray(1,'Foo'))
btk.btkMetaDataCreateChild(imu, 'TYPE', btk.btkIntArray(1,1))
channels = btk.btkIntArray(6); channels[0] = 6; channels[1] = 5; channels[2] = 4; channels[3] = 3; channels[4] = 2; channels[5] = 1
btk.btkMetaDataCreateChild(imu, 'CHANNEL', channels, 6)
imuse = btk.btkIMUsExtractor()
imuse.SetInput(acq)
output = imuse.GetOutput()
output.Update()
self.assertEqual(output.GetItemNumber(), 1)
self.assertEqual(output.GetItem(0).GetLabel(), 'Foo')
self.assertEqual(output.GetItem(0).GetDescription(), '')
self.assertEqual(output.GetItem(0).GetAccelerometerX().GetLabel(), acq.GetAnalog(5).GetLabel())
self.assertEqual(output.GetItem(0).GetAccelerometerY().GetLabel(), acq.GetAnalog(4).GetLabel())
self.assertEqual(output.GetItem(0).GetAccelerometerZ().GetLabel(), acq.GetAnalog(3).GetLabel())
self.assertEqual(output.GetItem(0).GetGyroscopeX().GetLabel(), acq.GetAnalog(2).GetLabel())
self.assertEqual(output.GetItem(0).GetGyroscopeY().GetLabel(), acq.GetAnalog(1).GetLabel())
self.assertEqual(output.GetItem(0).GetGyroscopeZ().GetLabel(), acq.GetAnalog(0).GetLabel())
def test_OneAcquisition_NoLabelsAndDescriptions(self):
acq = btk.btkAcquisition()
acq.Init(0,200,6)
imu = btk.btkMetaDataCreateChild(acq.GetMetaData(), 'IMU')
btk.btkMetaDataCreateChild(imu, 'USED', 1)
btk.btkMetaDataCreateChild(imu, 'TYPE', btk.btkIntArray(1,1))
channels = btk.btkIntArray(6); channels[0] = 6; channels[1] = 5; channels[2] = 4; channels[3] = 3; channels[4] = 2; channels[5] = 1
btk.btkMetaDataCreateChild(imu, 'CHANNEL', channels, 6)
imuse = btk.btkIMUsExtractor()
imuse.SetInput(acq)
output = imuse.GetOutput()
output.Update()
self.assertEqual(output.GetItemNumber(), 1)
self.assertEqual(output.GetItem(0).GetLabel(), 'IMU #1')
self.assertEqual(output.GetItem(0).GetDescription(), '')
self.assertEqual(output.GetItem(0).GetAccelerometerX().GetLabel(), acq.GetAnalog(5).GetLabel())
self.assertEqual(output.GetItem(0).GetAccelerometerY().GetLabel(), acq.GetAnalog(4).GetLabel())
self.assertEqual(output.GetItem(0).GetAccelerometerZ().GetLabel(), acq.GetAnalog(3).GetLabel())
self.assertEqual(output.GetItem(0).GetGyroscopeX().GetLabel(), acq.GetAnalog(2).GetLabel())
self.assertEqual(output.GetItem(0).GetGyroscopeY().GetLabel(), acq.GetAnalog(1).GetLabel())
self.assertEqual(output.GetItem(0).GetGyroscopeZ().GetLabel(), acq.GetAnalog(0).GetLabel())
def test_OneAcquisition_MissingChannels(self):
acq = btk.btkAcquisition()
acq.Init(0,200,6)
imu = btk.btkMetaDataCreateChild(acq.GetMetaData(), 'IMU')
btk.btkMetaDataCreateChild(imu, 'USED', 1)
btk.btkMetaDataCreateChild(imu, 'TYPE', btk.btkIntArray(1,1))
channels = btk.btkIntArray(6); channels[0] = 6; channels[1] = 5; channels[2] = -1; channels[3] = -1; channels[4] = 2; channels[5] = 1
btk.btkMetaDataCreateChild(imu, 'CHANNEL', channels, 6)
imuse = btk.btkIMUsExtractor()
imuse.SetInput(acq)
output = imuse.GetOutput()
output.Update()
self.assertEqual(output.GetItemNumber(), 1)
self.assertEqual(output.GetItem(0).GetLabel(), 'IMU #1')
self.assertEqual(output.GetItem(0).GetDescription(), '')
self.assertEqual(output.GetItem(0).GetAccelerometerX().GetLabel(), acq.GetAnalog(5).GetLabel())
self.assertEqual(output.GetItem(0).GetAccelerometerY().GetLabel(), acq.GetAnalog(4).GetLabel())
# self.assertTrue(output.GetItem(0).GetAccelerometerZ()._get() == 0)
# self.assertTrue(output.GetItem(0).GetGyroscopeX()._get() == 0)
self.assertEqual(output.GetItem(0).GetGyroscopeY().GetLabel(), acq.GetAnalog(1).GetLabel())
self.assertEqual(output.GetItem(0).GetGyroscopeZ().GetLabel(), acq.GetAnalog(0).GetLabel())
def test_OneAcquisition_TwoSensors(self):
acq = btk.btkAcquisition()
acq.Init(0,200,15)
imu = btk.btkMetaDataCreateChild(acq.GetMetaData(), 'IMU')
btk.btkMetaDataCreateChild(imu, 'USED', 2)
btk.btkMetaDataCreateChild(imu, 'LABELS', btk.btkStringArray(2,'Foo'))
btk.btkMetaDataCreateChild(imu, 'TYPE', btk.btkIntArray(2,1))
channels = btk.btkIntArray(18);
channels[0] = 1; channels[1] = 2; channels[2] = 3; channels[3] = 4; channels[4] = 5; channels[5] = 6; channels[6] = -1; channels[7] = -1; channels[8] = -1
channels[9] = 7; channels[10] = 8; channels[11] = 9; channels[12] = 10; channels[13] = 11; channels[14] = 12; channels[15] = 13; channels[16] = 14; channels[17] = 15
btk.btkMetaDataCreateChild(imu, 'CHANNEL', channels, 9)
extra = btk.btkIntArray(2); extra[0] = 0; extra[1] = 3;
btk.btkMetaDataCreateChild(imu, 'EXTRA', extra)
imuse = btk.btkIMUsExtractor()
imuse.SetInput(acq)
output = imuse.GetOutput()
output.Update()
self.assertEqual(output.GetItemNumber(), 2)
self.assertEqual(output.GetItem(0).GetLabel(), 'Foo')
self.assertEqual(output.GetItem(0).GetDescription(), '')
self.assertEqual(output.GetItem(0).GetAccelerometerX().GetLabel(), acq.GetAnalog(0).GetLabel())
self.assertEqual(output.GetItem(0).GetAccelerometerY().GetLabel(), acq.GetAnalog(1).GetLabel())
self.assertEqual(output.GetItem(0).GetAccelerometerZ().GetLabel(), acq.GetAnalog(2).GetLabel())
self.assertEqual(output.GetItem(0).GetGyroscopeX().GetLabel(), acq.GetAnalog(3).GetLabel())
self.assertEqual(output.GetItem(0).GetGyroscopeY().GetLabel(), acq.GetAnalog(4).GetLabel())
self.assertEqual(output.GetItem(0).GetGyroscopeZ().GetLabel(), acq.GetAnalog(5).GetLabel())
self.assertEqual(output.GetItem(1).GetLabel(), 'Foo')
self.assertEqual(output.GetItem(1).GetDescription(), '')
self.assertEqual(output.GetItem(1).GetAccelerometerX().GetLabel(), acq.GetAnalog(6).GetLabel())
self.assertEqual(output.GetItem(1).GetAccelerometerY().GetLabel(), acq.GetAnalog(7).GetLabel())
self.assertEqual(output.GetItem(1).GetAccelerometerZ().GetLabel(), acq.GetAnalog(8).GetLabel())
self.assertEqual(output.GetItem(1).GetGyroscopeX().GetLabel(), acq.GetAnalog(9).GetLabel())
self.assertEqual(output.GetItem(1).GetGyroscopeY().GetLabel(), acq.GetAnalog(10).GetLabel())
self.assertEqual(output.GetItem(1).GetGyroscopeZ().GetLabel(), acq.GetAnalog(11).GetLabel())
self.assertEqual(output.GetItem(1).GetChannel(6).GetLabel(), acq.GetAnalog(12).GetLabel())
self.assertEqual(output.GetItem(1).GetChannel(7).GetLabel(), acq.GetAnalog(13).GetLabel())
self.assertEqual(output.GetItem(1).GetChannel(8).GetLabel(), acq.GetAnalog(14).GetLabel())
def test_TwoAcquisitions(self):
# Acquisition #1
acq1 = btk.btkAcquisition()
acq1.Init(0,200,6)
imu = btk.btkMetaDataCreateChild(acq1.GetMetaData(), 'IMU')
btk.btkMetaDataCreateChild(imu, 'USED', 1)
btk.btkMetaDataCreateChild(imu, 'DESCRIPTIONS', btk.btkStringArray(1,'BAR'))
btk.btkMetaDataCreateChild(imu, 'TYPE', btk.btkIntArray(1,1))
channels = btk.btkIntArray(6); channels[0] = 1; channels[1] = 2; channels[2] = 3; channels[3] = 4; channels[4] = 5; channels[5] = 6
btk.btkMetaDataCreateChild(imu, 'CHANNEL', channels, 6)
# Acquisition #2 (two sensors)
acq2 = btk.btkAcquisition()
acq2.Init(0,200,15)
imu = btk.btkMetaDataCreateChild(acq2.GetMetaData(), 'IMU')
btk.btkMetaDataCreateChild(imu, 'USED', 2)
btk.btkMetaDataCreateChild(imu, 'LABELS', btk.btkStringArray(2,'Foo'))
btk.btkMetaDataCreateChild(imu, 'TYPE', btk.btkIntArray(2,1))
channels.resize(18)
channels[0] = 1; channels[1] = 2; channels[2] = 3; channels[3] = 4; channels[4] = 5; channels[5] = 6; channels[6] = -1; channels[7] = -1; channels[8] = -1
channels[9] = 7; channels[10] = 8; channels[11] = 9; channels[12] = 10; channels[13] = 11; channels[14] = 12; channels[15] = 13; channels[16] = 14; channels[17] = 15
btk.btkMetaDataCreateChild(imu, 'CHANNEL', channels, 9)
extra = btk.btkIntArray(2); extra[0] = 0; extra[1] = 3;
btk.btkMetaDataCreateChild(imu, 'EXTRA', extra)
imuse = btk.btkIMUsExtractor()
imuse.SetInput(0, acq1)
imuse.SetInput(1, acq2)
output = imuse.GetOutput()
output.Update()
self.assertEqual(output.GetItemNumber(), 3)
self.assertEqual(output.GetItem(0).GetLabel(), 'IMU #1')
self.assertEqual(output.GetItem(0).GetDescription(), 'BAR')
self.assertEqual(output.GetItem(0).GetAccelerometerX().GetLabel(), acq1.GetAnalog(0).GetLabel())
self.assertEqual(output.GetItem(0).GetAccelerometerY().GetLabel(), acq1.GetAnalog(1).GetLabel())
self.assertEqual(output.GetItem(0).GetAccelerometerZ().GetLabel(), acq1.GetAnalog(2).GetLabel())
self.assertEqual(output.GetItem(0).GetGyroscopeX().GetLabel(), acq1.GetAnalog(3).GetLabel())
self.assertEqual(output.GetItem(0).GetGyroscopeY().GetLabel(), acq1.GetAnalog(4).GetLabel())
self.assertEqual(output.GetItem(0).GetGyroscopeZ().GetLabel(), acq1.GetAnalog(5).GetLabel())
self.assertEqual(output.GetItem(1).GetLabel(), 'Foo')
self.assertEqual(output.GetItem(1).GetDescription(), '')
self.assertEqual(output.GetItem(1).GetAccelerometerX().GetLabel(), acq2.GetAnalog(0).GetLabel())
self.assertEqual(output.GetItem(1).GetAccelerometerY().GetLabel(), acq2.GetAnalog(1).GetLabel())
self.assertEqual(output.GetItem(1).GetAccelerometerZ().GetLabel(), acq2.GetAnalog(2).GetLabel())
self.assertEqual(output.GetItem(1).GetGyroscopeX().GetLabel(), acq2.GetAnalog(3).GetLabel())
self.assertEqual(output.GetItem(1).GetGyroscopeY().GetLabel(), acq2.GetAnalog(4).GetLabel())
self.assertEqual(output.GetItem(1).GetGyroscopeZ().GetLabel(), acq2.GetAnalog(5).GetLabel())
self.assertEqual(output.GetItem(2).GetLabel(), 'Foo')
self.assertEqual(output.GetItem(2).GetDescription(), '')
self.assertEqual(output.GetItem(2).GetAccelerometerX().GetLabel(), acq2.GetAnalog(6).GetLabel())
self.assertEqual(output.GetItem(2).GetAccelerometerY().GetLabel(), acq2.GetAnalog(7).GetLabel())
self.assertEqual(output.GetItem(2).GetAccelerometerZ().GetLabel(), acq2.GetAnalog(8).GetLabel())
self.assertEqual(output.GetItem(2).GetGyroscopeX().GetLabel(), acq2.GetAnalog(9).GetLabel())
self.assertEqual(output.GetItem(2).GetGyroscopeY().GetLabel(), acq2.GetAnalog(10).GetLabel())
self.assertEqual(output.GetItem(2).GetGyroscopeZ().GetLabel(), acq2.GetAnalog(11).GetLabel())
self.assertEqual(output.GetItem(2).GetChannel(6).GetLabel(), acq2.GetAnalog(12).GetLabel())
self.assertEqual(output.GetItem(2).GetChannel(7).GetLabel(), acq2.GetAnalog(13).GetLabel())
self.assertEqual(output.GetItem(2).GetChannel(8).GetLabel(), acq2.GetAnalog(14).GetLabel())
def test_TwoAcquisitions_NoIMU(self):
imuse = btk.btkIMUsExtractor()
imuse.SetInput(0,btk.btkAcquisition())
imuse.SetInput(1,btk.btkAcquisition())
output = imuse.GetOutput()
output.Update()
self.assertEqual(output.GetItemNumber(), 0)
| |
# -*- coding: utf-8 -*-
"""
werkzeug.testapp
~~~~~~~~~~~~~~~~
Provide a small test application that can be used to test a WSGI server
and check it for WSGI compliance.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import base64
import os
import sys
from textwrap import wrap
import werkzeug
from .utils import escape
from .wrappers import BaseRequest as Request
from .wrappers import BaseResponse as Response
logo = Response(
base64.b64decode(
"""
R0lGODlhoACgAOMIAAEDACwpAEpCAGdgAJaKAM28AOnVAP3rAP/////////
//////////////////////yH5BAEKAAgALAAAAACgAKAAAAT+EMlJq704680R+F0ojmRpnuj0rWnrv
nB8rbRs33gu0bzu/0AObxgsGn3D5HHJbCUFyqZ0ukkSDlAidctNFg7gbI9LZlrBaHGtzAae0eloe25
7w9EDOX2fst/xenyCIn5/gFqDiVVDV4aGeYiKkhSFjnCQY5OTlZaXgZp8nJ2ekaB0SQOjqphrpnOiq
ncEn65UsLGytLVmQ6m4sQazpbtLqL/HwpnER8bHyLrLOc3Oz8PRONPU1crXN9na263dMt/g4SzjMeX
m5yDpLqgG7OzJ4u8lT/P69ej3JPn69kHzN2OIAHkB9RUYSFCFQYQJFTIkCDBiwoXWGnowaLEjRm7+G
p9A7Hhx4rUkAUaSLJlxHMqVMD/aSycSZkyTplCqtGnRAM5NQ1Ly5OmzZc6gO4d6DGAUKA+hSocWYAo
SlM6oUWX2O/o0KdaVU5vuSQLAa0ADwQgMEMB2AIECZhVSnTno6spgbtXmHcBUrQACcc2FrTrWS8wAf
78cMFBgwIBgbN+qvTt3ayikRBk7BoyGAGABAdYyfdzRQGV3l4coxrqQ84GpUBmrdR3xNIDUPAKDBSA
ADIGDhhqTZIWaDcrVX8EsbNzbkvCOxG8bN5w8ly9H8jyTJHC6DFndQydbguh2e/ctZJFXRxMAqqPVA
tQH5E64SPr1f0zz7sQYjAHg0In+JQ11+N2B0XXBeeYZgBZFx4tqBToiTCPv0YBgQv8JqA6BEf6RhXx
w1ENhRBnWV8ctEX4Ul2zc3aVGcQNC2KElyTDYyYUWvShdjDyMOGMuFjqnII45aogPhz/CodUHFwaDx
lTgsaOjNyhGWJQd+lFoAGk8ObghI0kawg+EV5blH3dr+digkYuAGSaQZFHFz2P/cTaLmhF52QeSb45
Jwxd+uSVGHlqOZpOeJpCFZ5J+rkAkFjQ0N1tah7JJSZUFNsrkeJUJMIBi8jyaEKIhKPomnC91Uo+NB
yyaJ5umnnpInIFh4t6ZSpGaAVmizqjpByDegYl8tPE0phCYrhcMWSv+uAqHfgH88ak5UXZmlKLVJhd
dj78s1Fxnzo6yUCrV6rrDOkluG+QzCAUTbCwf9SrmMLzK6p+OPHx7DF+bsfMRq7Ec61Av9i6GLw23r
idnZ+/OO0a99pbIrJkproCQMA17OPG6suq3cca5ruDfXCCDoS7BEdvmJn5otdqscn+uogRHHXs8cbh
EIfYaDY1AkrC0cqwcZpnM6ludx72x0p7Fo/hZAcpJDjax0UdHavMKAbiKltMWCF3xxh9k25N/Viud8
ba78iCvUkt+V6BpwMlErmcgc502x+u1nSxJSJP9Mi52awD1V4yB/QHONsnU3L+A/zR4VL/indx/y64
gqcj+qgTeweM86f0Qy1QVbvmWH1D9h+alqg254QD8HJXHvjQaGOqEqC22M54PcftZVKVSQG9jhkv7C
JyTyDoAJfPdu8v7DRZAxsP/ky9MJ3OL36DJfCFPASC3/aXlfLOOON9vGZZHydGf8LnxYJuuVIbl83y
Az5n/RPz07E+9+zw2A2ahz4HxHo9Kt79HTMx1Q7ma7zAzHgHqYH0SoZWyTuOLMiHwSfZDAQTn0ajk9
YQqodnUYjByQZhZak9Wu4gYQsMyEpIOAOQKze8CmEF45KuAHTvIDOfHJNipwoHMuGHBnJElUoDmAyX
c2Qm/R8Ah/iILCCJOEokGowdhDYc/yoL+vpRGwyVSCWFYZNljkhEirGXsalWcAgOdeAdoXcktF2udb
qbUhjWyMQxYO01o6KYKOr6iK3fE4MaS+DsvBsGOBaMb0Y6IxADaJhFICaOLmiWTlDAnY1KzDG4ambL
cWBA8mUzjJsN2KjSaSXGqMCVXYpYkj33mcIApyhQf6YqgeNAmNvuC0t4CsDbSshZJkCS1eNisKqlyG
cF8G2JeiDX6tO6Mv0SmjCa3MFb0bJaGPMU0X7c8XcpvMaOQmCajwSeY9G0WqbBmKv34DsMIEztU6Y2
KiDlFdt6jnCSqx7Dmt6XnqSKaFFHNO5+FmODxMCWBEaco77lNDGXBM0ECYB/+s7nKFdwSF5hgXumQe
EZ7amRg39RHy3zIjyRCykQh8Zo2iviRKyTDn/zx6EefptJj2Cw+Ep2FSc01U5ry4KLPYsTyWnVGnvb
UpyGlhjBUljyjHhWpf8OFaXwhp9O4T1gU9UeyPPa8A2l0p1kNqPXEVRm1AOs1oAGZU596t6SOR2mcB
Oco1srWtkaVrMUzIErrKri85keKqRQYX9VX0/eAUK1hrSu6HMEX3Qh2sCh0q0D2CtnUqS4hj62sE/z
aDs2Sg7MBS6xnQeooc2R2tC9YrKpEi9pLXfYXp20tDCpSP8rKlrD4axprb9u1Df5hSbz9QU0cRpfgn
kiIzwKucd0wsEHlLpe5yHXuc6FrNelOl7pY2+11kTWx7VpRu97dXA3DO1vbkhcb4zyvERYajQgAADs
="""
),
mimetype="image/png",
)
TEMPLATE = u"""\
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<title>WSGI Information</title>
<style type="text/css">
@import url(https://fonts.googleapis.com/css?family=Ubuntu);
body { font-family: 'Lucida Grande', 'Lucida Sans Unicode', 'Geneva',
'Verdana', sans-serif; background-color: white; color: #000;
font-size: 15px; text-align: center; }
#logo { float: right; padding: 0 0 10px 10px; }
div.box { text-align: left; width: 45em; margin: auto; padding: 50px 0;
background-color: white; }
h1, h2 { font-family: 'Ubuntu', 'Lucida Grande', 'Lucida Sans Unicode',
'Geneva', 'Verdana', sans-serif; font-weight: normal; }
h1 { margin: 0 0 30px 0; }
h2 { font-size: 1.4em; margin: 1em 0 0.5em 0; }
table { width: 100%%; border-collapse: collapse; border: 1px solid #AFC5C9 }
table th { background-color: #AFC1C4; color: white; font-size: 0.72em;
font-weight: normal; width: 18em; vertical-align: top;
padding: 0.5em 0 0.1em 0.5em; }
table td { border: 1px solid #AFC5C9; padding: 0.1em 0 0.1em 0.5em; }
code { font-family: 'Consolas', 'Monaco', 'Bitstream Vera Sans Mono',
monospace; font-size: 0.7em; }
ul li { line-height: 1.5em; }
ul.path { font-size: 0.7em; margin: 0 -30px; padding: 8px 30px;
list-style: none; background: #E8EFF0; }
ul.path li { line-height: 1.6em; }
li.virtual { color: #999; text-decoration: underline; }
li.exp { background: white; }
</style>
<div class="box">
<img src="?resource=logo" id="logo" alt="[The Werkzeug Logo]" />
<h1>WSGI Information</h1>
<p>
This page displays all available information about the WSGI server and
the underlying Python interpreter.
<h2 id="python-interpreter">Python Interpreter</h2>
<table>
<tr>
<th>Python Version
<td>%(python_version)s
<tr>
<th>Platform
<td>%(platform)s [%(os)s]
<tr>
<th>API Version
<td>%(api_version)s
<tr>
<th>Byteorder
<td>%(byteorder)s
<tr>
<th>Werkzeug Version
<td>%(werkzeug_version)s
</table>
<h2 id="wsgi-environment">WSGI Environment</h2>
<table>%(wsgi_env)s</table>
<h2 id="installed-eggs">Installed Eggs</h2>
<p>
The following python packages were installed on the system as
Python eggs:
<ul>%(python_eggs)s</ul>
<h2 id="sys-path">System Path</h2>
<p>
The following paths are the current contents of the load path. The
following entries are looked up for Python packages. Note that not
all items in this path are folders. Gray and underlined items are
entries pointing to invalid resources or used by custom import hooks
such as the zip importer.
<p>
Items with a bright background were expanded for display from a relative
path. If you encounter such paths in the output you might want to check
your setup as relative paths are usually problematic in multithreaded
environments.
<ul class="path">%(sys_path)s</ul>
</div>
"""
def iter_sys_path():
if os.name == "posix":
def strip(x):
prefix = os.path.expanduser("~")
if x.startswith(prefix):
x = "~" + x[len(prefix) :]
return x
else:
def strip(x):
return x
cwd = os.path.abspath(os.getcwd())
for item in sys.path:
path = os.path.join(cwd, item or os.path.curdir)
yield strip(os.path.normpath(path)), not os.path.isdir(path), path != item
def render_testapp(req):
try:
import pkg_resources
except ImportError:
eggs = ()
else:
eggs = sorted(pkg_resources.working_set, key=lambda x: x.project_name.lower())
python_eggs = []
for egg in eggs:
try:
version = egg.version
except (ValueError, AttributeError):
version = "unknown"
python_eggs.append(
"<li>%s <small>[%s]</small>" % (escape(egg.project_name), escape(version))
)
wsgi_env = []
sorted_environ = sorted(req.environ.items(), key=lambda x: repr(x[0]).lower())
for key, value in sorted_environ:
wsgi_env.append(
"<tr><th>%s<td><code>%s</code>"
% (escape(str(key)), " ".join(wrap(escape(repr(value)))))
)
sys_path = []
for item, virtual, expanded in iter_sys_path():
class_ = []
if virtual:
class_.append("virtual")
if expanded:
class_.append("exp")
sys_path.append(
"<li%s>%s"
% (' class="%s"' % " ".join(class_) if class_ else "", escape(item))
)
return (
TEMPLATE
% {
"python_version": "<br>".join(escape(sys.version).splitlines()),
"platform": escape(sys.platform),
"os": escape(os.name),
"api_version": sys.api_version,
"byteorder": sys.byteorder,
"werkzeug_version": werkzeug.__version__,
"python_eggs": "\n".join(python_eggs),
"wsgi_env": "\n".join(wsgi_env),
"sys_path": "\n".join(sys_path),
}
).encode("utf-8")
def test_app(environ, start_response):
"""Simple test application that dumps the environment. You can use
it to check if Werkzeug is working properly:
.. sourcecode:: pycon
>>> from werkzeug.serving import run_simple
>>> from werkzeug.testapp import test_app
>>> run_simple('localhost', 3000, test_app)
* Running on http://localhost:3000/
The application displays important information from the WSGI environment,
the Python interpreter and the installed libraries.
"""
req = Request(environ, populate_request=False)
if req.args.get("resource") == "logo":
response = logo
else:
response = Response(render_testapp(req), mimetype="text/html")
return response(environ, start_response)
if __name__ == "__main__":
from .serving import run_simple
run_simple("localhost", 5000, test_app, use_reloader=True)
| |
#!/usr/bin/env python3
# author: @netmanchris
# This section imports required libraries
import json
import requests
import ipaddress
HEADERS = {'Accept': 'application/json', 'Content-Type':
'application/json', 'Accept-encoding': 'application/json'}
def get_real_time_locate(ipAddress, auth, url):
"""
function takes the ipAddress of a specific host and issues a RESTFUL call to get the device and interface that the
target host is currently connected to. Note: Although intended to return a single location, Multiple locations may
be returned for a single host due to a partially discovered network or misconfigured environment.
:param ipAddress: str value valid IPv4 IP address
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: list of dictionaries where each element of the list represents the location of the target host
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> found_device = get_real_time_locate('10.101.0.51', auth.creds, auth.url)
>>> assert type(found_device) is list
>>> assert 'deviceId' in found_device[0]
>>> assert 'deviceId' in found_device[0]
>>> assert 'deviceId' in found_device[0]
>>> assert 'deviceId' in found_device[0]
>>> no_device = get_real_time_locate('192.168.254.254', auth.creds, auth.url)
>>> assert type(no_device) is dict
>>> assert len(no_device) == 0
"""
real_time_locate_url = "/imcrs/res/access/realtimeLocate?type=2&value=" + str(ipAddress) + "&total=false"
f_url = url + real_time_locate_url
r = requests.get(f_url, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents
try:
if r.status_code == 200:
response = json.loads(r.text)
if 'realtimeLocation' in response:
real_time_locate = json.loads(r.text)['realtimeLocation']
if type(real_time_locate) is dict:
real_time_locate = [real_time_locate]
return real_time_locate
else:
return json.loads(r.text)['realtimeLocation']
else:
return json.loads(r.text)
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_real_time_locate: An Error has occured"
def get_ip_mac_arp_list(devId, auth,url):
"""
function takes devid of specific device and issues a RESTFUL call to get the IP/MAC/ARP list from the target device.
:param devId: int or str value of the target device.
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: list of dictionaries containing the IP/MAC/ARP list of the target device.
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> ip_mac_list = get_ip_mac_arp_list('10', auth.creds, auth.url)
>>> assert type(ip_mac_list) is list
>>> assert 'deviceId' in ip_mac_list[0]
"""
if auth is None or url is None: # checks to see if the imc credentials are already available
set_imc_creds()
ip_mac_arp_list_url = "/imcrs/res/access/ipMacArp/" + str(devId)
f_url = url + ip_mac_arp_list_url
r = requests.get(f_url, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents
try:
if r.status_code == 200:
macarplist = (json.loads(r.text))
if len(macarplist) > 1:
return macarplist['ipMacArp']
else:
return ['this function is unsupported']
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_ip_mac_arp_list: An Error has occured"
#this section deals with the IP Address Manager functions with terminal access of HPE IMC Base platform
#Following functions deal with IP scopes
def get_ip_scope(auth, url, scopeId=None,):
"""
function requires no inputs and returns all IP address scopes currently configured on the HPE IMC server. If the
optional scopeId parameter is included, this will automatically return only the desired scope id.
:param scopeId: integer of the desired scope id ( optional )
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: list of dictionary objects where each element of the list represents one IP scope
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> ip_scope_list = get_ip_scope(auth.creds, auth.url)
>>> assert type(ip_scope_list) is list
>>> assert 'ip' in ip_scope_list[0]
"""
if scopeId is None:
get_ip_scope_url = "/imcrs/res/access/assignedIpScope"
else:
get_ip_scope_url = "/imcrs/res/access/assignedIpScope/ip?ipScopeId="+str(scopeId)
f_url = url + get_ip_scope_url
r = requests.get(f_url, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents
try:
if r.status_code == 200:
ipscopelist = (json.loads(r.text))
return ipscopelist['assignedIpScope']
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_ip_scope: An Error has occured"
def get_ip_scope_detail(scopeId, auth, url ):
"""
function requires no inputs and returns all IP address scopes currently configured on the HPE IMC server. If the
optional scopeId parameter is included, this will automatically return only the desired scope id.
:param scopeId: integer of the desired scope id ( optional )
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: dictionary, may containing multiple entries if sub-scopes have been created
:rtype: dict
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> ip_scope_detail = get_ip_scope_detail('45', auth.creds, auth.url)
>>> assert type(ip_scope_detail) is dict
>>> assert 'startIp' in ip_scope_detail
"""
get_ip_scope_url = "/imcrs/res/access/assignedIpScope/"+str(scopeId)
f_url = url + get_ip_scope_url
r = requests.get(f_url, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents
try:
if r.status_code == 200:
ipscopelist = (json.loads(r.text))
return ipscopelist
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_ip_scope: An Error has occured"
def add_ip_scope(startIp, endIp, name, description, auth, url):
"""
Function takes input of four strings Start Ip, endIp, name, and description to add new Ip Scope to terminal access
in the HPE IMC base platform
:param startIp: str Start of IP address scope ex. '10.101.0.1'
:param endIp: str End of IP address scope ex. '10.101.0.254'
:param name: str Name of the owner of this IP scope ex. 'admin'
:param description: str description of the Ip scope
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: 200 if successfull
:rtype:
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> delete_ip_scope('10.50.0.0/24', auth.creds, auth.url)
<Response [204]>
>>> new_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url)
>>> assert type(new_scope) is int
>>> assert new_scope == 200
>>> existing_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url)
>>> assert type(existing_scope) is int
>>> assert existing_scope == 409
"""
if auth is None or url is None: # checks to see if the imc credentials are already available
set_imc_creds()
add_ip_scope_url = "/imcrs/res/access/assignedIpScope"
f_url = url + add_ip_scope_url
payload = ('''{ "startIp": "%s", "endIp": "%s","name": "%s","description": "%s" }'''
%(str(startIp), str(endIp), str(name), str(description)))
r = requests.post(f_url, auth=auth, headers=HEADERS, data=payload) # creates the URL using the payload variable as the contents
try:
if r.status_code == 200:
#print("IP Scope Successfully Created")
return r.status_code
elif r.status_code == 409:
#print ("IP Scope Already Exists")
return r.status_code
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " add_ip_scope: An Error has occured"
def add_child_ip_scope(startIp, endIp, name, description, scopeid, auth, url):
"""
Function takes input of four strings Start Ip, endIp, name, and description to add new Ip Scope to terminal access
in the HPE IMC base platform
:param startIp: str Start of IP address scope ex. '10.101.0.1'
:param endIp: str End of IP address scope ex. '10.101.0.254'
:param name: str Name of the owner of this IP scope ex. 'admin'
:param description: str description of the Ip scope
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: 200
:rtype:
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> add_child_ip_scope('10.50.0.1', '10.50.0.126', 'cyoung', 'test sub scope', '175', auth.creds, auth.url)
"""
if auth is None or url is None: # checks to see if the imc credentials are already available
set_imc_creds()
add_ip_scope_url = "/imcrs/res/access/assignedIpScope/" + str(scopeid)
f_url = url + add_ip_scope_url
payload = ('''{ "startIp": "%s", "endIp": "%s","name": "%s","description": "%s", "parentId" : "%s"}'''
%(str(startIp), str(endIp), str(name), str(description), str(scopeid)))
r = requests.post(f_url, auth=auth, headers=HEADERS, data=payload) # creates the URL using the payload variable as the contents
try:
if r.status_code == 200:
#print("IP Scope Successfully Created")
return r.status_code
elif r.status_code == 409:
#print ("Conflict with Current Scope")
return r.status_code
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " add_ip_scope: An Error has occured"
def delete_ip_scope(network_address, auth, url):
'''Function to delete an entire IP segment from the IMC IP Address management under terminal access
:param network_address
:param auth
:param url
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> new_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url)
>>> delete_scope = delete_ip_scope('10.50.0.0/24', auth.creds, auth.url)
'''
scope_id = get_scope_id(network_address, auth,url)
delete_ip_address_url = '''/imcrs/res/access/assignedIpScope/'''+str(scope_id)
f_url = url + delete_ip_address_url
r = requests.delete(f_url, auth=auth, headers=HEADERS)
try:
return r
if r.status_code == 204:
#print("IP Segment Successfully Deleted")
return r.status_code
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " delete_ip_scope: An Error has occured"
#Following functions deal with hosts assigned to IP scopes
def add_scope_ip(ipaddress, name, description, scopeid, auth, url):
"""
Function to add new host IP address allocation to existing scope ID
:param ipaddress:
:param name: name of the owner of this host
:param description: Description of the host
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return:
:rtype:
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> add_scope_ip('10.50.0.5', 'cyoung', 'New Test Host','175', auth.creds, auth.url)
"""
new_ip = { "ip": ipaddress,
"name": name,
"description": description}
add_scope_ip_url = '/imcrs/res/access/assignedIpScope/ip?ipScopeId='+str(scopeid)
f_url = url + add_scope_ip_url
payload = json.dumps(new_ip)
r = requests.post(f_url, auth=auth, headers=HEADERS,
data=payload) # creates the URL using the payload variable as the contents
try:
if r.status_code == 200:
#print("IP Scope Successfully Created")
return r.status_code
elif r.status_code == 409:
#print("IP Scope Already Exists")
return r.status_code
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " add_ip_scope: An Error has occured"
def remove_scope_ip(hostid, auth, url):
"""
Function to add remove IP address allocation
:param hostid: Host id of the host to be deleted
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: String of HTTP response code. Should be 204 is successfull
:rtype: str
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> new_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url)
>>> add_host_to_segment('10.50.0.5', 'cyoung', 'New Test Host', '10.50.0.0/24', auth.creds, auth.url)
>>> host_id = get_host_id('10.50.0.5', '10.50.0.0/24', auth.creds, auth.url)
>>> rem_host = remove_scope_ip(host_id, auth.creds, auth.url)
>>> assert type(rem_host) is int
>>> assert rem_host == 204
"""
add_scope_ip_url = '/imcrs/res/access/assignedIpScope/ip/'+str(hostid)
f_url = url + add_scope_ip_url
r = requests.delete(f_url, auth=auth, headers=HEADERS,
)
try:
if r.status_code == 204:
#print("Host Successfully Deleted")
return r.status_code
elif r.status_code == 409:
#print("IP Scope Already Exists")
return r.status_code
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " add_ip_scope: An Error has occured"
def get_ip_scope_hosts( scopeId, auth, url):
"""
Function requires input of scope ID and returns list of allocated IP address for the specified scope
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:param scopeId: Interger of teh desired scope id
:return: list of dictionary objects where each element of the list represents a single host assigned to the IP scope
:rtype: list
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> scope_id = get_scope_id('10.50.0.0/24', auth.creds, auth.url)
>>> ip_scope_hosts = get_ip_scope_hosts(scope_id, auth.creds, auth.url)
>>> assert type(ip_scope_hosts) is list
>>> assert 'name' in ip_scope_hosts[0]
>>> assert 'description' in ip_scope_hosts[0]
>>> assert 'ip' in ip_scope_hosts[0]
>>> assert 'id' in ip_scope_hosts[0]
"""
get_ip_scope_url = "/imcrs/res/access/assignedIpScope/ip?size=10000&ipScopeId="+str(scopeId)
f_url = url + get_ip_scope_url
r = requests.get(f_url, auth=auth, headers=HEADERS) # creates the URL using the payload variable as the contents
try:
if r.status_code == 200:
ipscopelist = (json.loads(r.text))
if ipscopelist == {}:
return ipscopelist
else: ipscopelist = ipscopelist['assignedIpInfo']
if type(ipscopelist) is dict:
ipscope = []
ipscope.append(ipscopelist)
return ipscope
return ipscopelist
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " get_ip_scope: An Error has occured"
def add_scope_ip(ipaddress, name, description, scopeid, auth, url):
"""
Function to add new host IP address allocation to existing scope ID
:param ipaddress:
:param name: name of the owner of this host
:param description: Description of the host
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return:
:rtype:
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> new_host = add_scope_ip('10.50.0.5', 'cyoung', 'New Test Host','175', auth.creds, auth.url)
"""
new_ip = { "ip": ipaddress,
"name": name,
"description": description}
add_scope_ip_url = '/imcrs/res/access/assignedIpScope/ip?ipScopeId='+str(scopeid)
f_url = url + add_scope_ip_url
payload = json.dumps(new_ip)
r = requests.post(f_url, auth=auth, headers=HEADERS,
data=payload) # creates the URL using the payload variable as the contents
try:
if r.status_code == 200:
#print("IP Host Successfully Created")
return r.status_code
elif r.status_code == 409:
#print("IP Host Already Exists")
return r.status_code
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + " add_ip_scope: An Error has occured"
def add_host_to_segment(ipaddress, name, description, network_address, auth, url):
''' Function to abstract existing add_scope_ip_function. Allows for use of network address rather than forcing human
to learn the scope_id
:param ipaddress:
:param name: name of the owner of this host
:param description: Description of the host
:param: network_address: network address of the target scope in format x.x.x.x/yy where x.x.x.x representents the
network address and yy represents the length of the subnet mask. Example: 10.50.0.0 255.255.255.0 would be written
as 10.50.0.0/24
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return:
:rtype:
'''
scope_id = get_scope_id(network_address, auth, url)
add_scope_ip(ipaddress, name, description, scope_id, auth,url)
def delete_host_from_segment(ipaddress, networkaddress, auth, url):
'''Function to abstract
'''
host_id = get_host_id(ipaddress, networkaddress, auth, url)
remove_scope_ip(host_id, auth.creds, auth.url)
"""
Following Section are Helper functions to help translate human readable IPv4 address to IMC internal keys for working with IP
scopes and hosts
"""
def get_scope_id(network_address, auth, url):
"""
:param network_address: network address of the target scope in format x.x.x.x/yy where x.x.x.x representents the
network address and yy represents the length of the subnet mask. Example: 10.50.0.0 255.255.255.0 would be written
as 10.50.0.0/24
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: str object which contains the numerical ID of the target scope
:rtype: str
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> new_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url)
>>> scope_id = get_scope_id('10.50.0.0/24', auth.creds, auth.url)
>>> assert type(scope_id) is str
"""
netaddr = ipaddress.ip_network(network_address)
scopes = get_ip_scope(auth, url)
for i in scopes:
if int(i['id']) > 0:
if ipaddress.ip_address(i['startIp']) in netaddr and ipaddress.ip_address(i['endIp']) in netaddr:
return i['id']
if "assignedIpScope" in i:
for child in i['assignedIpScope']:
if ipaddress.ip_address(child['startIp']) in netaddr and ipaddress.ip_address(child['endIp']) in netaddr:
return child['id']
def get_host_id(host_address, network_address, auth, url):
"""
:param host: str describing network address of the target scope in format x.x.x.x where x.x.x.x representents the
full ipv4 address. Example: 10.50.0.5 255.255.255.0 would be written
as 10.50.0.5
:param network_address: network address of the target scope in format x.x.x.x/yy where x.x.x.x representents the
network address and yy represents the length of the subnet mask. Example: 10.50.0.0 255.255.255.0 would be written
as 10.50.0.0/24
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: str object which contains the numerical ID of the target scope
:rtype: str
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.termaccess import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> new_scope = add_ip_scope('10.50.0.1', '10.50.0.254', 'cyoung', 'test group', auth.creds, auth.url)
>>> add_host_to_segment('10.50.0.5', 'cyoung', 'New Test Host', '10.50.0.0/24', auth.creds, auth.url)
>>> new_host_id = get_host_id('10.50.0.5', '10.50.0.0/24', auth.creds, auth.url)
>>> assert type(new_host_id) is str
"""
scope_id = get_scope_id(network_address, auth, url)
all_scope_hosts = get_ip_scope_hosts(scope_id, auth, url)
for host in all_scope_hosts:
if host['ip'] == host_address:
return host['id']
| |
from rest_framework import viewsets
from rest_framework.response import Response
from signup.models import SignUp, TimeBlock, SignUpSlot
from datetime import datetime, time, date, timedelta
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from signup.serializers import SignUpSheetSerializer
from posts.serializers import PostSerializer
from authentication.models import Account
from posts.models import Post
from combine_slots import combine
from pre_signup.views import PrefSignUpCreatAndListView
# Create your views here.
class SignUpCreateAndListView(viewsets.ModelViewSet):
serializer_class = SignUpSheetSerializer
queryset = SignUp.objects.all()
'''
Expect data format:
name: name
max_duration: max_duration (int)
min_duration: min_duration (int)
max_slots: max_slots(int)
begin_time_list: 'a list
end_time_list: 'a list
'''
# I have seen: datetime.datetime.strptime(u'2014-03-06T04:38:51Z', '%Y-%m-%dT%H:%M:%SZ')
# datetime.strptime(date_posted, '%Y-%m-%dT%H:%M:%S.%SZ')
#u'2015-03-22T18:00:00.000Z'
def create(self, request):
def unicode_to_datetime(code):
datetime_obj = datetime.strptime(code, '%Y-%m-%dT%H:%M:%S.%fZ')
return datetime_obj
owner = Account.objects.get(email = request.user.email)
name = request.data['content']
week_num = request.data['weekNum']
day_of_week = request.data['dayOfWeek']
loc = request.data['location']
max_slot = request.data['numSlotsPerUser']
min_duration = request.data['minTimes']['undefined']
max_duration = request.data['maxTimes']['undefined']
begin_time_list_unicode = request.data['beginDateTimes']
end_time_list_unicode = request.data['endDateTimes']
begin_time_list_datetime = list(map(unicode_to_datetime, begin_time_list_unicode))
end_time_list_datetime = list(map(unicode_to_datetime, end_time_list_unicode))
post = Post.objects.create(author = owner, content= "SignUp: " + name, description_event = "Sign up sheet",
week_num = week_num, day_of_week = day_of_week,
location_event = loc, start_time = begin_time_list_datetime[0],
need_repeat = False, is_date_set = False, is_week_set = True)
post.save()
SignUp.objects.create_signup(post, name, loc, max_duration, min_duration,
max_slot, begin_time_list_datetime, end_time_list_datetime)
post_data = PostSerializer(post).data
return Response(post_data, status=status.HTTP_201_CREATED)
'''
This API is called when user click on the description
button on a post card.
If the post card is a regular post, this function uses PostSerializer.
If the post card is a signup object, this function uses SignUpSheetSerializer.
When the JSON string is created, the function adds a type key into the JSON.
So when the front end gets the JSON, the front end can check the data.data['type']
if the result is 'post' then it's a regular post. If the reuslt is 'signup', then
it's a signup.
And again, the parameter has to have name post_pk, otherwise: runtime error:
list() got an unexpected keyword argument 'post_pk'
'''
def list(self, request, post_pk):
post = Post.objects.get(pk = post_pk)
post_owner = post.author
requester = Account.objects.get(email=request.user.email)
if hasattr(post, 'signup'):
print 'Post is a signup sheet'
if requester != post_owner:
print 'requester is NOT post owner'
#TODO:
serializer = SignUpSheetSerializer(post.signup, context={'is_owner': False, 'requester':requester.username})
else:
print 'requester is post owner'
serializer = SignUpSheetSerializer(post.signup, context={'is_owner': True, 'requester': post_owner.username})
elif hasattr(post, 'prefsignup'):
print 'This is a pre-based signup'
instance = PrefSignUpCreatAndListView()
response = PrefSignUpCreatAndListView.list(instance, request, post_pk)
return response
else:
print 'Post is a regular post'
serializer = PostSerializer(post)
# Now that I have a JSON, how do I inject a field into this JSON?
print serializer.data
return Response(serializer.data)
class SignUpView(viewsets.ModelViewSet):
serializer_class = SignUpSheetSerializer
queryset = SignUp.objects.all()
'''
This function is called when a requester is choosing the duration of slots.
Then this function sends back all slot of that particular duration.
'''
def list(self, request, post_pk, duration_pk, *args, **kwargs):
print 'SignUpViewList'
post = Post.objects.get(pk = post_pk)
post_owner = post.author
requester = Account.objects.get(email=request.user.email)
signup_sheet = post.signup
min_duration = signup_sheet.min_duration
duration = int(duration_pk)
print duration
print type(duration)
num_slots_to_combine = duration/min_duration
print num_slots_to_combine
if num_slots_to_combine != 1:
data = combine(requester,post, post.signup, num_slots_to_combine)
print 'Back to SignupView List'
print data
return Response(data)
else:
serializer = SignUpSheetSerializer(post.signup, context={'is_owner': False, 'requester': requester.username})
print serializer.data
return Response(serializer.data)
'''
Expecting a list of start_time and end_time. Or a list of
start_time and a list of end_time
Also, post id.
time data '2015-03-24T18:00:00Z' does not match format '%Y-%m-%dT%H:%M:%S.%fZ'
This function is for requester to actually signup.
'''
def create(self, request, *args, **kwargs):
def unicode_to_datetime(code):
datetime_obj = datetime.strptime(code, '%Y-%m-%dT%H:%M:%SZ')
return datetime_obj
print 'Signup Slot create'
requester = Account.objects.get(email=request.user.email)
post = Post.objects.get(pk = request.data['postPk'])
slot_querset = SignUpSlot.objects.filter(block__sheet__post = post)
for slot in slot_querset:
if slot.owner == requester:
slot.owner = None
slot.save()
begin_time_list_unicode = request.data['beginDateTimes']
end_time_list_unicode = request.data['endDateTimes']
begin_time_list_datetime = list(map(unicode_to_datetime, begin_time_list_unicode))
end_time_list_datetime = list(map(unicode_to_datetime, end_time_list_unicode))
print begin_time_list_datetime
print end_time_list_datetime
num_requested_slot = len(begin_time_list_datetime)
max_slots = post.signup.max_slots
min_duration = timedelta(minutes=post.signup.min_duration)
print num_requested_slot
print max_slots
print post.signup.min_duration
if num_requested_slot > max_slots:
return Response(status=status.HTTP_400_BAD_REQUEST)
for i in range(0, len(begin_time_list_datetime)):
start_slot = slot_querset.get(start_time = begin_time_list_datetime[i])
end_slot = slot_querset.get(end_time = end_time_list_datetime[i])
end_slot.owner = requester
end_slot.save()
while start_slot != end_slot:
start_slot.owner = requester
start_slot.save()
start_slot = slot_querset.get(start_time = start_slot.start_time + min_duration)
data = SignUpSheetSerializer(post.signup, context={'is_owner': False, 'requester': requester.username})
print 'Sign up sheet after sign up create'
print data.data
return Response(data.data, status=status.HTTP_201_CREATED)
| |
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import os
from robot.errors import DataError
from robot.libraries import STDLIBS
from robot.output import LOGGER
from robot.utils import (getdoc, get_error_details, Importer, is_java_init,
is_java_method, JYTHON, normalize, seq2str2, unic,
is_list_like, PY2, PYPY, type_name)
from .arguments import EmbeddedArguments
from .context import EXECUTION_CONTEXTS
from .dynamicmethods import (GetKeywordArguments, GetKeywordDocumentation,
GetKeywordNames, RunKeyword)
from .handlers import Handler, InitHandler, DynamicHandler, EmbeddedArgumentsHandler
from .handlerstore import HandlerStore
from .libraryscopes import LibraryScope
from .outputcapture import OutputCapturer
if JYTHON:
from java.lang import Object
else:
Object = None
def TestLibrary(name, args=None, variables=None, create_handlers=True):
if name in STDLIBS:
import_name = 'robot.libraries.' + name
else:
import_name = name
with OutputCapturer(library_import=True):
importer = Importer('test library')
libcode, source = importer.import_class_or_module(import_name,
return_source=True)
libclass = _get_lib_class(libcode)
lib = libclass(libcode, name, args or [], source, variables)
if create_handlers:
lib.create_handlers()
return lib
def _get_lib_class(libcode):
if inspect.ismodule(libcode):
return _ModuleLibrary
if GetKeywordNames(libcode):
if RunKeyword(libcode):
return _DynamicLibrary
else:
return _HybridLibrary
return _ClassLibrary
class _BaseTestLibrary(object):
_failure_level = 'INFO'
def __init__(self, libcode, name, args, source, variables):
if os.path.exists(name):
name = os.path.splitext(os.path.basename(os.path.abspath(name)))[0]
self.version = self._get_version(libcode)
self.name = name
self.orig_name = name # Stores original name when importing WITH NAME
self.source = source
self.handlers = HandlerStore(self.name, HandlerStore.TEST_LIBRARY_TYPE)
self.has_listener = None # Set when first instance is created
self._doc = None
self.doc_format = self._get_doc_format(libcode)
self.scope = LibraryScope(libcode, self)
self.init = self._create_init_handler(libcode)
self.positional_args, self.named_args \
= self.init.resolve_arguments(args, variables)
self._libcode = libcode
self._libinst = None
def __len__(self):
return len(self.handlers)
@property
def doc(self):
if self._doc is None:
self._doc = getdoc(self.get_instance())
return self._doc
def create_handlers(self):
self._create_handlers(self.get_instance())
self.reset_instance()
def reload(self):
self.handlers = HandlerStore(self.name, HandlerStore.TEST_LIBRARY_TYPE)
self._create_handlers(self.get_instance())
def start_suite(self):
self.scope.start_suite()
def end_suite(self):
self.scope.end_suite()
def start_test(self):
self.scope.start_test()
def end_test(self):
self.scope.end_test()
def _get_version(self, libcode):
return self._get_attr(libcode, 'ROBOT_LIBRARY_VERSION') \
or self._get_attr(libcode, '__version__')
def _get_attr(self, object, attr, default='', upper=False):
value = unic(getattr(object, attr, default))
if upper:
value = normalize(value, ignore='_').upper()
return value
def _get_doc_format(self, libcode):
return self._get_attr(libcode, 'ROBOT_LIBRARY_DOC_FORMAT', upper=True)
def _create_init_handler(self, libcode):
return InitHandler(self, self._resolve_init_method(libcode))
def _resolve_init_method(self, libcode):
init = getattr(libcode, '__init__', None)
return init if init and self._valid_init(init) else lambda: None
def _valid_init(self, method):
# https://bitbucket.org/pypy/pypy/issues/2462/
if PYPY:
if PY2:
return method.__func__ is not object.__init__.__func__
return method is not object.__init__
return (inspect.ismethod(method) or # PY2
inspect.isfunction(method) or # PY3
is_java_init(method))
def reset_instance(self, instance=None):
prev = self._libinst
if not self.scope.is_global:
self._libinst = instance
return prev
def get_instance(self, create=True):
if not create:
return self._libinst
if self._libinst is None:
self._libinst = self._get_instance(self._libcode)
if self.has_listener is None:
self.has_listener = bool(self.get_listeners(self._libinst))
return self._libinst
def _get_instance(self, libcode):
with OutputCapturer(library_import=True):
try:
return libcode(*self.positional_args, **dict(self.named_args))
except:
self._raise_creating_instance_failed()
def get_listeners(self, libinst=None):
if not libinst:
libinst = self.get_instance()
listeners = getattr(libinst, 'ROBOT_LIBRARY_LISTENER', None)
if listeners is None:
return []
if is_list_like(listeners):
return listeners
return [listeners]
def register_listeners(self):
if self.has_listener:
try:
listeners = EXECUTION_CONTEXTS.current.output.library_listeners
listeners.register(self.get_listeners(), self)
except DataError as err:
self.has_listener = False
# Error should have information about suite where the
# problem occurred but we don't have such info here.
LOGGER.error("Registering listeners for library '%s' failed: %s"
% (self.name, err))
def unregister_listeners(self, close=False):
if self.has_listener:
listeners = EXECUTION_CONTEXTS.current.output.library_listeners
listeners.unregister(self, close)
def close_global_listeners(self):
if self.scope.is_global:
for listener in self.get_listeners():
self._close_listener(listener)
def _close_listener(self, listener):
method = (getattr(listener, 'close', None) or
getattr(listener, '_close', None))
try:
if method:
method()
except:
message, details = get_error_details()
name = getattr(listener, '__name__', None) or type_name(listener)
LOGGER.error("Calling method '%s' of listener '%s' failed: %s"
% (method.__name__, name, message))
LOGGER.info("Details:\n%s" % details)
def _create_handlers(self, libcode):
try:
names = self._get_handler_names(libcode)
except:
message, details = get_error_details()
raise DataError("Getting keyword names from library '%s' failed: %s"
% (self.name, message), details)
for name in names:
method = self._try_to_get_handler_method(libcode, name)
if method:
handler, embedded = self._try_to_create_handler(name, method)
if handler:
try:
self.handlers.add(handler, embedded)
except DataError as err:
LOGGER.error("Error in test library '%s': "
"Creating keyword '%s' failed: %s"
% (self.name, handler.name, err.message))
else:
LOGGER.debug("Created keyword '%s'" % handler.name)
def _get_handler_names(self, libcode):
return [name for name in dir(libcode)
if not name.startswith(('_', 'ROBOT_'))]
def _try_to_get_handler_method(self, libcode, name):
try:
return self._get_handler_method(libcode, name)
except:
self._report_adding_keyword_failed(name)
return None
def _report_adding_keyword_failed(self, name, message=None, details=None,
level=None):
if not message:
message, details = get_error_details()
LOGGER.write("Adding keyword '%s' to library '%s' failed: %s"
% (name, self.name, message), level or self._failure_level)
if details:
LOGGER.debug('Details:\n%s' % details)
def _get_handler_method(self, libcode, name):
method = getattr(libcode, name)
if not inspect.isroutine(method):
raise DataError('Not a method or function')
return method
def _try_to_create_handler(self, name, method):
try:
handler = self._create_handler(name, method)
except DataError as err:
self._report_adding_keyword_failed(name, err.message, level='ERROR')
return None, False
except:
self._report_adding_keyword_failed(name)
return None, False
try:
return self._get_possible_embedded_args_handler(handler)
except DataError as err:
self._report_adding_keyword_failed(handler.name, err.message,
level='ERROR')
return None, False
def _create_handler(self, handler_name, handler_method):
return Handler(self, handler_name, handler_method)
def _get_possible_embedded_args_handler(self, handler):
embedded = EmbeddedArguments(handler.name)
if embedded:
self._validate_embedded_count(embedded, handler.arguments)
return EmbeddedArgumentsHandler(embedded.name, handler), True
return handler, False
def _validate_embedded_count(self, embedded, arguments):
if not (arguments.minargs <= len(embedded.args) <= arguments.maxargs):
raise DataError('Embedded argument count does not match number of '
'accepted arguments.')
def _raise_creating_instance_failed(self):
msg, details = get_error_details()
if self.positional_args or self.named_args:
args = self.positional_args \
+ ['%s=%s' % item for item in self.named_args]
args_text = 'arguments %s' % seq2str2(args)
else:
args_text = 'no arguments'
raise DataError("Initializing test library '%s' with %s failed: %s\n%s"
% (self.name, args_text, msg, details))
class _ClassLibrary(_BaseTestLibrary):
def _get_handler_method(self, libinst, name):
# Type is checked before using getattr to avoid calling properties,
# most importantly bean properties generated by Jython (issue 188).
for item in (libinst,) + inspect.getmro(libinst.__class__):
if item in (object, Object):
continue
if not (hasattr(item, '__dict__') and name in item.__dict__):
continue
self._validate_handler(item.__dict__[name])
return getattr(libinst, name)
raise DataError('No non-implicit implementation found')
def _validate_handler(self, handler):
if not self._is_routine(handler):
raise DataError('Not a method or function')
if self._is_implicit_java_or_jython_method(handler):
raise DataError('Implicit methods are ignored')
def _is_routine(self, handler):
return inspect.isroutine(handler) or is_java_method(handler)
def _is_implicit_java_or_jython_method(self, handler):
if not is_java_method(handler):
return False
for signature in handler.argslist[:handler.nargs]:
cls = signature.declaringClass
if not (cls is Object or self._is_created_by_jython(handler, cls)):
return False
return True
def _is_created_by_jython(self, handler, cls):
proxy_methods = getattr(cls, '__supernames__', []) + ['classDictInit']
return handler.__name__ in proxy_methods
class _ModuleLibrary(_BaseTestLibrary):
def _get_handler_method(self, libcode, name):
method = _BaseTestLibrary._get_handler_method(self, libcode, name)
if hasattr(libcode, '__all__') and name not in libcode.__all__:
raise DataError('Not exposed as a keyword')
return method
def get_instance(self, create=True):
if not create:
return self._libcode
if self.has_listener is None:
self.has_listener = bool(self.get_listeners(self._libcode))
return self._libcode
def _create_init_handler(self, libcode):
return InitHandler(self, lambda: None)
class _HybridLibrary(_BaseTestLibrary):
_failure_level = 'ERROR'
def _get_handler_names(self, instance):
return GetKeywordNames(instance)()
class _DynamicLibrary(_BaseTestLibrary):
_failure_level = 'ERROR'
def __init__(self, libcode, name, args, source, variables=None):
_BaseTestLibrary.__init__(self, libcode, name, args, source, variables)
@property
def doc(self):
if self._doc is None:
self._doc = (self._get_kw_doc('__intro__') or
_BaseTestLibrary.doc.fget(self))
return self._doc
def _get_kw_doc(self, name, instance=None):
getter = GetKeywordDocumentation(instance or self.get_instance())
return getter(name)
def _get_kw_args(self, name, instance=None):
getter = GetKeywordArguments(instance or self.get_instance())
return getter(name)
def _get_handler_names(self, instance):
return GetKeywordNames(instance)()
def _get_handler_method(self, instance, name):
return RunKeyword(instance)
def _create_handler(self, name, method):
doc = self._get_kw_doc(name)
argspec = self._get_kw_args(name)
return DynamicHandler(self, name, method, doc, argspec)
def _create_init_handler(self, libcode):
docgetter = lambda: self._get_kw_doc('__init__')
return InitHandler(self, self._resolve_init_method(libcode), docgetter)
| |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import os
import unittest
import txaio
if os.environ.get('USE_TWISTED', False):
txaio.use_twisted()
elif os.environ.get('USE_ASYNCIO', False):
txaio.use_asyncio()
from autobahn.wamp import message
from autobahn.wamp import role
from autobahn.wamp import serializer
def generate_test_messages():
"""
List of WAMP test message used for serializers. Expand this if you add more
options or messages.
This list of WAMP message does not contain any binary app payloads!
"""
some_bytes = os.urandom(32)
some_unicode = '\u3053\u3093\u306b\u3061\u306f\u4e16\u754c'
some_uri = 'com.myapp.foobar'
some_unicode_uri = 'com.myapp.\u4f60\u597d\u4e16\u754c.baz'
some_args = [1, 2, 3, 'hello', some_bytes, some_unicode, {'foo': 23, 'bar': 'hello', 'baz': some_bytes, 'moo': some_unicode}]
some_kwargs = {'foo': 23, 'bar': 'hello', 'baz': some_bytes, 'moo': some_unicode, 'arr': some_args}
msgs = [
message.Hello("realm1", {'subscriber': role.RoleSubscriberFeatures()}),
message.Hello("realm1", {'publisher': role.RolePublisherFeatures()}),
message.Hello("realm1", {'caller': role.RoleCallerFeatures()}),
message.Hello("realm1", {'callee': role.RoleCalleeFeatures()}),
message.Hello("realm1", {
'subscriber': role.RoleSubscriberFeatures(),
'publisher': role.RolePublisherFeatures(),
'caller': role.RoleCallerFeatures(),
'callee': role.RoleCalleeFeatures(),
}),
message.Goodbye(),
message.Yield(123456),
message.Yield(123456, args=some_args),
message.Yield(123456, args=[], kwargs=some_kwargs),
message.Yield(123456, args=some_args, kwargs=some_kwargs),
message.Yield(123456, progress=True),
message.Interrupt(123456),
message.Interrupt(123456, mode=message.Interrupt.KILL),
message.Invocation(123456, 789123),
message.Invocation(123456, 789123, args=some_args),
message.Invocation(123456, 789123, args=[], kwargs=some_kwargs),
message.Invocation(123456, 789123, args=some_args, kwargs=some_kwargs),
message.Invocation(123456, 789123, timeout=10000),
message.Result(123456),
message.Result(123456, args=some_args),
message.Result(123456, args=[], kwargs=some_kwargs),
message.Result(123456, args=some_args, kwargs=some_kwargs),
message.Result(123456, progress=True),
message.Cancel(123456),
message.Cancel(123456, mode=message.Cancel.KILL),
message.Call(123456, some_uri),
message.Call(123456, some_uri, args=some_args),
message.Call(123456, some_uri, args=[], kwargs=some_kwargs),
message.Call(123456, some_uri, args=some_args, kwargs=some_kwargs),
message.Call(123456, some_uri, timeout=10000),
message.Call(123456, some_unicode_uri),
message.Call(123456, some_unicode_uri, args=some_args),
message.Call(123456, some_unicode_uri, args=[], kwargs=some_kwargs),
message.Call(123456, some_unicode_uri, args=some_args, kwargs=some_kwargs),
message.Call(123456, some_unicode_uri, timeout=10000),
message.Unregistered(123456),
message.Unregister(123456, 789123),
message.Registered(123456, 789123),
message.Register(123456, some_uri),
message.Register(123456, some_uri, match='prefix'),
message.Register(123456, some_uri, invoke='roundrobin'),
message.Register(123456, some_unicode_uri),
message.Register(123456, some_unicode_uri, match='prefix'),
message.Register(123456, some_unicode_uri, invoke='roundrobin'),
message.Event(123456, 789123),
message.Event(123456, 789123, args=some_args),
message.Event(123456, 789123, args=[], kwargs=some_kwargs),
message.Event(123456, 789123, args=some_args, kwargs=some_kwargs),
message.Event(123456, 789123, publisher=300),
message.Published(123456, 789123),
message.Publish(123456, some_uri),
message.Publish(123456, some_uri, args=some_args),
message.Publish(123456, some_uri, args=[], kwargs=some_kwargs),
message.Publish(123456, some_uri, args=some_args, kwargs=some_kwargs),
message.Publish(123456, some_uri, exclude_me=False, exclude=[300], eligible=[100, 200, 300]),
message.Publish(123456, some_unicode_uri),
message.Publish(123456, some_unicode_uri, args=some_args),
message.Publish(123456, some_unicode_uri, args=[], kwargs=some_kwargs),
message.Publish(123456, some_unicode_uri, args=some_args, kwargs=some_kwargs),
message.Publish(123456, some_unicode_uri, exclude_me=False, exclude=[300], eligible=[100, 200, 300]),
message.Unsubscribed(123456),
message.Unsubscribe(123456, 789123),
message.Subscribed(123456, 789123),
message.Subscribe(123456, some_uri),
message.Subscribe(123456, some_uri, match=message.Subscribe.MATCH_PREFIX),
message.Subscribe(123456, some_unicode_uri),
message.Subscribe(123456, some_unicode_uri, match=message.Subscribe.MATCH_PREFIX),
message.Error(message.Call.MESSAGE_TYPE, 123456, some_uri),
message.Error(message.Call.MESSAGE_TYPE, 123456, some_uri, args=some_args),
message.Error(message.Call.MESSAGE_TYPE, 123456, some_uri, args=[], kwargs=some_kwargs),
message.Error(message.Call.MESSAGE_TYPE, 123456, some_uri, args=some_args, kwargs=some_kwargs),
message.Error(message.Call.MESSAGE_TYPE, 123456, some_unicode_uri),
message.Error(message.Call.MESSAGE_TYPE, 123456, some_unicode_uri, args=some_args),
message.Error(message.Call.MESSAGE_TYPE, 123456, some_unicode_uri, args=[], kwargs=some_kwargs),
message.Error(message.Call.MESSAGE_TYPE, 123456, some_unicode_uri, args=some_args, kwargs=some_kwargs),
message.Result(123456),
message.Result(123456, args=some_args),
message.Result(123456, args=some_args, kwargs=some_kwargs),
]
return [(False, msg) for msg in msgs]
def generate_test_messages_binary():
"""
Generate WAMP test messages which contain binary app payloads.
With the JSON serializer, this currently only works on Python 3 (both CPython3 and PyPy3),
because even on Python 3, we need to patch the stdlib JSON, and on Python 2, the patching
would be even hackier.
"""
msgs = []
for binary in [b'',
b'\x00',
b'\30',
os.urandom(4),
os.urandom(16),
os.urandom(128),
os.urandom(256),
os.urandom(512),
os.urandom(1024)]:
msgs.append(message.Event(123456, 789123, args=[binary]))
msgs.append(message.Event(123456, 789123, args=[binary], kwargs={'foo': binary}))
return [(True, msg) for msg in msgs]
def create_serializers():
_serializers = []
_serializers.append(serializer.JsonSerializer())
_serializers.append(serializer.JsonSerializer(batched=True))
_serializers.append(serializer.MsgPackSerializer())
_serializers.append(serializer.MsgPackSerializer(batched=True))
_serializers.append(serializer.CBORSerializer())
_serializers.append(serializer.CBORSerializer(batched=True))
_serializers.append(serializer.UBJSONSerializer())
_serializers.append(serializer.UBJSONSerializer(batched=True))
# FIXME: implement full FlatBuffers serializer for WAMP
# WAMP-FlatBuffers currently only supports Python 3
# _serializers.append(serializer.FlatBuffersSerializer())
# _serializers.append(serializer.FlatBuffersSerializer(batched=True))
return _serializers
class TestFlatBuffersSerializer(unittest.TestCase):
def test_basic(self):
messages = [
message.Event(123456,
789123,
args=[1, 2, 3],
kwargs={'foo': 23, 'bar': 'hello'},
publisher=666,
retained=True),
message.Publish(123456,
'com.example.topic1',
args=[1, 2, 3],
kwargs={'foo': 23, 'bar': 'hello'},
retain=True)
]
ser = serializer.FlatBuffersSerializer()
# from pprint import pprint
for msg in messages:
# serialize message
payload, binary = ser.serialize(msg)
# unserialize message again
msg2 = ser.unserialize(payload, binary)[0]
# pprint(msg.marshal())
# pprint(msg2.marshal())
# must be equal: message roundtrips via the serializer
self.assertEqual(msg, msg2)
# self.assertEqual(msg.subscription, msg2.subscription)
# self.assertEqual(msg.publication, msg2.publication)
class TestSerializer(unittest.TestCase):
def setUp(self):
self._test_messages = generate_test_messages() + generate_test_messages_binary()
self._test_serializers = create_serializers()
# print('Testing WAMP serializers {} with {} WAMP test messages'.format([ser.SERIALIZER_ID for ser in self._test_serializers], len(self._test_messages)))
def test_deep_equal_msg(self):
"""
Test deep object equality assert (because I am paranoid).
"""
v = os.urandom(10)
o1 = [1, 2, {'foo': 'bar', 'bar': v, 'baz': [9, 3, 2], 'goo': {'moo': [1, 2, 3]}}, v]
o2 = [1, 2, {'goo': {'moo': [1, 2, 3]}, 'bar': v, 'baz': [9, 3, 2], 'foo': 'bar'}, v]
self.assertEqual(o1, o2)
def test_roundtrip_msg(self):
"""
Test round-tripping over each serializer.
"""
for ser in self._test_serializers:
for contains_binary, msg in self._test_messages:
# serialize message
payload, binary = ser.serialize(msg)
# unserialize message again
msg2 = ser.unserialize(payload, binary)
# must be equal: message roundtrips via the serializer
self.assertEqual([msg], msg2)
def test_crosstrip_msg(self):
"""
Test cross-tripping over 2 serializers (as is done by WAMP routers).
"""
for ser1 in self._test_serializers:
for contains_binary, msg in self._test_messages:
# serialize message
payload, binary = ser1.serialize(msg)
# unserialize message again
msg1 = ser1.unserialize(payload, binary)
msg1 = msg1[0]
for ser2 in self._test_serializers:
# serialize message
payload, binary = ser2.serialize(msg1)
# unserialize message again
msg2 = ser2.unserialize(payload, binary)
# must be equal: message crosstrips via
# the serializers ser1 -> ser2
self.assertEqual([msg], msg2)
def test_cache_msg(self):
"""
Test message serialization caching.
"""
for contains_binary, msg in self._test_messages:
# message serialization cache is initially empty
self.assertEqual(msg._serialized, {})
for ser in self._test_serializers:
# verify message serialization is not yet cached
self.assertFalse(ser._serializer in msg._serialized)
payload, binary = ser.serialize(msg)
# now the message serialization must be cached
self.assertTrue(ser._serializer in msg._serialized)
self.assertEqual(msg._serialized[ser._serializer], payload)
# and after resetting the serialization cache, message
# serialization is gone
msg.uncache()
self.assertFalse(ser._serializer in msg._serialized)
def test_initial_stats(self):
"""
Test initial serializer stats are indeed empty.
"""
for ser in self._test_serializers:
stats = ser.stats(details=True)
self.assertEqual(stats['serialized']['bytes'], 0)
self.assertEqual(stats['serialized']['messages'], 0)
self.assertEqual(stats['serialized']['rated_messages'], 0)
self.assertEqual(stats['unserialized']['bytes'], 0)
self.assertEqual(stats['unserialized']['messages'], 0)
self.assertEqual(stats['unserialized']['rated_messages'], 0)
def test_serialize_stats(self):
"""
Test serializer stats are non-empty after serializing/unserializing messages.
"""
for ser in self._test_serializers:
for contains_binary, msg in self._test_messages:
# serialize message
payload, binary = ser.serialize(msg)
# unserialize message again
ser.unserialize(payload, binary)
stats = ser.stats(details=False)
self.assertTrue(stats['bytes'] > 0)
self.assertTrue(stats['messages'] > 0)
self.assertTrue(stats['rated_messages'] > 0)
def test_serialize_stats_with_details(self):
"""
Test serializer stats - with details - are non-empty after serializing/unserializing messages.
"""
for ser in self._test_serializers:
for contains_binary, msg in self._test_messages:
# serialize message
payload, binary = ser.serialize(msg)
# unserialize message again
ser.unserialize(payload, binary)
stats = ser.stats(details=True)
# {'serialized': {'bytes': 7923, 'messages': 59, 'rated_messages': 69}, 'unserialized': {'bytes': 7923, 'messages': 59, 'rated_messages': 69}}
# print(stats)
self.assertTrue(stats['serialized']['bytes'] > 0)
self.assertTrue(stats['serialized']['messages'] > 0)
self.assertTrue(stats['serialized']['rated_messages'] > 0)
self.assertTrue(stats['unserialized']['bytes'] > 0)
self.assertTrue(stats['unserialized']['messages'] > 0)
self.assertTrue(stats['unserialized']['rated_messages'] > 0)
self.assertEqual(stats['serialized']['bytes'], stats['unserialized']['bytes'])
self.assertEqual(stats['serialized']['messages'], stats['unserialized']['messages'])
self.assertEqual(stats['serialized']['rated_messages'], stats['unserialized']['rated_messages'])
def test_reset_stats(self):
"""
Test serializer stats are reset after fetching stats - depending on option.
"""
for ser in self._test_serializers:
for contains_binary, msg in self._test_messages:
# serialize message
payload, binary = ser.serialize(msg)
# unserialize message again
ser.unserialize(payload, binary)
ser.stats()
stats = ser.stats(details=True)
self.assertEqual(stats['serialized']['bytes'], 0)
self.assertEqual(stats['serialized']['messages'], 0)
self.assertEqual(stats['serialized']['rated_messages'], 0)
self.assertEqual(stats['unserialized']['bytes'], 0)
self.assertEqual(stats['unserialized']['messages'], 0)
self.assertEqual(stats['unserialized']['rated_messages'], 0)
def test_auto_stats(self):
"""
Test serializer stats are non-empty after serializing/unserializing messages.
"""
for ser in self._test_serializers:
def on_stats(stats):
self.assertTrue(stats['bytes'] > 0)
self.assertTrue(stats['messages'] > 0)
self.assertTrue(stats['rated_messages'] > 0)
ser.set_stats_autoreset(10, 0, on_stats)
for contains_binary, msg in self._test_messages:
# serialize message
payload, binary = ser.serialize(msg)
# unserialize message again
ser.unserialize(payload, binary)
| |
# Copyright 2021 The Kubeflow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from kubernetes import client, config
import multiprocessing
from datetime import datetime
import grpc
from pkg.apis.manager.v1beta1.python import api_pb2
from pkg.apis.manager.v1beta1.python import api_pb2_grpc
logger = logging.getLogger()
logging.basicConfig(level=logging.INFO)
STATUS_EARLY_STOPPED = "EarlyStopped"
KUBEFLOW_GROUP = "kubeflow.org"
KATIB_VERSION = "v1beta1"
TRIAL_PLURAL = "trials"
APISERVER_TIMEOUT = 120
DEFAULT_NAMESPACE = "default"
SUCCEEDED_TRIAL = api_pb2.TrialStatus.TrialConditionType.SUCCEEDED
class MedianStopService(api_pb2_grpc.EarlyStoppingServicer):
def __init__(self):
super(MedianStopService, self).__init__()
self.is_first_run = True
# Default settings
self.min_trials_required = 3
self.start_step = 4
# trials_avg_history is the dict with succeeded Trials history where
# key = Trial name, value = average value for "start_step" reported metrics.
self.trials_avg_history = {}
# Assume that Trial namespace = Suggestion namespace.
try:
with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r') as f:
self.namespace = f.readline()
# Set config and api instance for k8s client.
config.load_incluster_config()
# This is used when service is not running in k8s, e.g. for unit tests.
except Exception as e:
logger.info("{}. Service is not running in Kubernetes Pod, \"{}\" namespace is used".format(
e, DEFAULT_NAMESPACE
))
self.namespace = DEFAULT_NAMESPACE
# Set config and api instance for k8s client.
config.load_kube_config()
self.api_instance = client.CustomObjectsApi()
def ValidateEarlyStoppingSettings(self, request, context):
is_valid, message = self.validate_early_stopping_spec(request.early_stopping)
if not is_valid:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(message)
logger.error(message)
return api_pb2.ValidateEarlyStoppingSettingsReply()
def validate_early_stopping_spec(self, early_stopping_spec):
algorithm_name = early_stopping_spec.algorithm_name
if algorithm_name == "medianstop":
return self.validate_medianstop_setting(early_stopping_spec.algorithm_settings)
else:
return False, "unknown algorithm name {}".format(algorithm_name)
@staticmethod
def validate_medianstop_setting(early_stopping_settings):
for setting in early_stopping_settings:
try:
if setting.name == "min_trials_required":
if not (int(setting.value) > 0):
return False, "min_trials_required must be greater than zero (>0)"
elif setting.name == "start_step":
if not (int(setting.value) >= 1):
return False, "start_step must be greater or equal than one (>=1)"
else:
return False, "unknown setting {} for algorithm medianstop".format(setting.name)
except Exception as e:
return False, "failed to validate {}({}): {}".format(setting.name, setting.value, e)
return True, ""
def GetEarlyStoppingRules(self, request, context):
logger.info("Get new early stopping rules")
# Get required values for the first call.
if self.is_first_run:
self.is_first_run = False
# Get early stopping settings.
self.get_early_stopping_settings(request.experiment.spec.early_stopping.algorithm_settings)
logger.info("Median stopping settings are: min_trials_required: {}, start_step: {}".format(
self.min_trials_required, self.start_step))
# Get comparison type and objective metric
if request.experiment.spec.objective.type == api_pb2.MAXIMIZE:
self.comparison = api_pb2.LESS
else:
self.comparison = api_pb2.GREATER
self.objective_metric = request.experiment.spec.objective.objective_metric_name
# Get DB manager address. It should have host and port.
# For example: katib-db-manager.kubeflow:6789 - default one.
self.db_manager_address = request.db_manager_address.split(':')
if len(self.db_manager_address) != 2:
raise Exception("Invalid Katib DB manager service address: {}".format(self.db_manager_address))
early_stopping_rules = []
median = self.get_median_value(request.trials)
if median is not None:
early_stopping_rules.append(api_pb2.EarlyStoppingRule(
name=self.objective_metric,
value=str(median),
comparison=self.comparison,
start_step=self.start_step,
))
logger.info("New early stopping rules are:\n {}\n\n".format(early_stopping_rules))
return api_pb2.GetEarlyStoppingRulesReply(
early_stopping_rules=early_stopping_rules
)
def get_early_stopping_settings(self, early_stopping_settings):
for setting in early_stopping_settings:
if setting.name == "min_trials_required":
self.min_trials_required = int(setting.value)
elif setting.name == "start_step":
self.start_step = int(setting.value)
def get_median_value(self, trials):
for trial in trials:
# Get metrics only for the new succeeded Trials.
if trial.name not in self.trials_avg_history and trial.status.condition == SUCCEEDED_TRIAL:
channel = grpc.beta.implementations.insecure_channel(
self.db_manager_address[0], int(self.db_manager_address[1]))
with api_pb2.beta_create_DBManager_stub(channel) as client:
get_log_response = client.GetObservationLog(api_pb2.GetObservationLogRequest(
trial_name=trial.name,
metric_name=self.objective_metric
), timeout=APISERVER_TIMEOUT)
# Get only first start_step metrics.
# Since metrics are collected consistently and ordered by time, we slice top start_step metrics.
first_x_logs = get_log_response.observation_log.metric_logs[:self.start_step]
metric_sum = 0
for log in first_x_logs:
metric_sum += float(log.metric.value)
# Get average metric value for the Trial.
new_average = metric_sum / len(first_x_logs)
self.trials_avg_history[trial.name] = new_average
logger.info("Adding new succeeded Trial: {} with average metrics value: {}".format(
trial.name, new_average))
logger.info("Trials average log history: {}".format(self.trials_avg_history))
# If count of succeeded Trials is greater than min_trials_required, calculate median.
if len(self.trials_avg_history) >= self.min_trials_required:
median = sum(list(self.trials_avg_history.values())) / len(self.trials_avg_history)
logger.info("Generate new Median value: {}".format(median))
return median
# Else, return None.
logger.info("Count of succeeded Trials: {} is less than min_trials_required: {}".format(
len(self.trials_avg_history), self.min_trials_required
))
return None
def SetTrialStatus(self, request, context):
trial_name = request.trial_name
logger.info("Update status for Trial: {}".format(trial_name))
# TODO (andreyvelich): Move this part to Katib SDK ?
# Get Trial object
thread = self.api_instance.get_namespaced_custom_object(
KUBEFLOW_GROUP,
KATIB_VERSION,
self.namespace,
TRIAL_PLURAL,
trial_name,
async_req=True)
trial = None
try:
trial = thread.get(APISERVER_TIMEOUT)
except multiprocessing.TimeoutError:
raise Exception("Timeout trying to get Katib Trial")
except Exception as e:
raise Exception(
"Get Trial: {} in namespace: {} failed. Exception: {}".format(trial_name, self.namespace, e))
time_now = datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ")
early_stopped_condition = {
"type": STATUS_EARLY_STOPPED,
"status": "True",
"reason": "TrialEarlyStopped",
"message": "Trial is early stopped",
"lastUpdateTime": time_now,
"lastTransitionTime": time_now,
}
trial["status"]["conditions"].append(early_stopped_condition)
# Update Trial object with early stopped status
try:
self.api_instance.patch_namespaced_custom_object_status(
KUBEFLOW_GROUP,
KATIB_VERSION,
self.namespace,
TRIAL_PLURAL,
trial_name,
trial,
async_req=True)
except Exception as e:
raise Exception(
"Update status for Trial: {} in namespace: {} failed. Exception: {}".format(
trial_name, self.namespace, e))
logger.info("Changed status to: {} for Trial: {} in namespace: {}\n\n".format(
STATUS_EARLY_STOPPED, trial_name, self.namespace))
return api_pb2.SetTrialStatusReply()
| |
from jsonrpc import ServiceProxy
import sys
import string
import getpass
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:10823")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:10823")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitmark address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitmark address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = getpass.getpass(prompt="Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = getpass.getpass(prompt="Enter old wallet passphrase: ")
pwd2 = getpass.getpass(prompt="Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
from bs4 import BeautifulSoup
import frappe
import frappe.share
import frappe.defaults
import frappe.permissions
from frappe.model.document import Document
from frappe.utils import (cint, flt, has_gravatar, escape_html, format_datetime,
now_datetime, get_formatted_email, today, get_time_zone)
from frappe import throw, msgprint, _
from frappe.utils.password import update_password as _update_password, check_password, get_password_reset_limit
from frappe.desk.notifications import clear_notifications
from frappe.desk.doctype.notification_settings.notification_settings import create_notification_settings, toggle_notifications
from frappe.utils.user import get_system_managers
from frappe.website.utils import is_signup_disabled
from frappe.rate_limiter import rate_limit
from frappe.core.doctype.user_type.user_type import user_linked_with_permission_on_doctype
from frappe.query_builder import DocType
STANDARD_USERS = frappe.STANDARD_USERS
class User(Document):
__new_password = None
def __setup__(self):
# because it is handled separately
self.flags.ignore_save_passwords = ['new_password']
def autoname(self):
"""set name as Email Address"""
if self.get("is_admin") or self.get("is_guest"):
self.name = self.first_name
else:
self.email = self.email.strip().lower()
self.name = self.email
def onload(self):
from frappe.config import get_modules_from_all_apps
self.set_onload('all_modules',
[m.get("module_name") for m in get_modules_from_all_apps()])
def before_insert(self):
self.flags.in_insert = True
throttle_user_creation()
def after_insert(self):
create_notification_settings(self.name)
frappe.cache().delete_key('users_for_mentions')
frappe.cache().delete_key('enabled_users')
def validate(self):
# clear new password
self.__new_password = self.new_password
self.new_password = ""
if not frappe.flags.in_test:
self.password_strength_test()
if self.name not in STANDARD_USERS:
self.validate_email_type(self.email)
self.validate_email_type(self.name)
self.add_system_manager_role()
self.set_system_user()
self.set_full_name()
self.check_enable_disable()
self.ensure_unique_roles()
self.remove_all_roles_for_guest()
self.validate_username()
self.remove_disabled_roles()
self.validate_user_email_inbox()
ask_pass_update()
self.validate_roles()
self.validate_allowed_modules()
self.validate_user_image()
self.set_time_zone()
if self.language == "Loading...":
self.language = None
if (self.name not in ["Administrator", "Guest"]) and (not self.get_social_login_userid("frappe")):
self.set_social_login_userid("frappe", frappe.generate_hash(length=39))
def validate_roles(self):
if self.role_profile_name:
role_profile = frappe.get_doc('Role Profile', self.role_profile_name)
self.set('roles', [])
self.append_roles(*[role.role for role in role_profile.roles])
def validate_allowed_modules(self):
if self.module_profile:
module_profile = frappe.get_doc('Module Profile', self.module_profile)
self.set('block_modules', [])
for d in module_profile.get('block_modules'):
self.append('block_modules', {
'module': d.module
})
def validate_user_image(self):
if self.user_image and len(self.user_image) > 2000:
frappe.throw(_("Not a valid User Image."))
def on_update(self):
# clear new password
self.share_with_self()
clear_notifications(user=self.name)
frappe.clear_cache(user=self.name)
now=frappe.flags.in_test or frappe.flags.in_install
self.send_password_notification(self.__new_password)
frappe.enqueue(
'frappe.core.doctype.user.user.create_contact',
user=self,
ignore_mandatory=True,
now=now
)
if self.name not in ('Administrator', 'Guest') and not self.user_image:
frappe.enqueue('frappe.core.doctype.user.user.update_gravatar', name=self.name, now=now)
# Set user selected timezone
if self.time_zone:
frappe.defaults.set_default("time_zone", self.time_zone, self.name)
if self.has_value_changed('allow_in_mentions') or self.has_value_changed('user_type'):
frappe.cache().delete_key('users_for_mentions')
if self.has_value_changed('enabled'):
frappe.cache().delete_key('enabled_users')
def has_website_permission(self, ptype, user, verbose=False):
"""Returns true if current user is the session user"""
return self.name == frappe.session.user
def set_full_name(self):
self.full_name = " ".join(filter(None, [self.first_name, self.last_name]))
def check_enable_disable(self):
# do not allow disabling administrator/guest
if not cint(self.enabled) and self.name in STANDARD_USERS:
frappe.throw(_("User {0} cannot be disabled").format(self.name))
if not cint(self.enabled):
self.a_system_manager_should_exist()
# clear sessions if disabled
if not cint(self.enabled) and getattr(frappe.local, "login_manager", None):
frappe.local.login_manager.logout(user=self.name)
# toggle notifications based on the user's status
toggle_notifications(self.name, enable=cint(self.enabled))
def add_system_manager_role(self):
# if adding system manager, do nothing
if not cint(self.enabled) or ("System Manager" in [user_role.role for user_role in
self.get("roles")]):
return
if (self.name not in STANDARD_USERS and self.user_type == "System User" and not self.get_other_system_managers()
and cint(frappe.db.get_single_value('System Settings', 'setup_complete'))):
msgprint(_("Adding System Manager to this User as there must be atleast one System Manager"))
self.append("roles", {
"doctype": "Has Role",
"role": "System Manager"
})
if self.name == 'Administrator':
# Administrator should always have System Manager Role
self.extend("roles", [
{
"doctype": "Has Role",
"role": "System Manager"
},
{
"doctype": "Has Role",
"role": "Administrator"
}
])
def email_new_password(self, new_password=None):
if new_password and not self.flags.in_insert:
_update_password(user=self.name, pwd=new_password, logout_all_sessions=self.logout_all_sessions)
def set_system_user(self):
'''For the standard users like admin and guest, the user type is fixed.'''
user_type_mapper = {
'Administrator': 'System User',
'Guest': 'Website User'
}
if self.user_type and not frappe.get_cached_value('User Type', self.user_type, 'is_standard'):
if user_type_mapper.get(self.name):
self.user_type = user_type_mapper.get(self.name)
else:
self.set_roles_and_modules_based_on_user_type()
else:
'''Set as System User if any of the given roles has desk_access'''
self.user_type = 'System User' if self.has_desk_access() else 'Website User'
def set_roles_and_modules_based_on_user_type(self):
user_type_doc = frappe.get_cached_doc('User Type', self.user_type)
if user_type_doc.role:
self.roles = []
# Check whether User has linked with the 'Apply User Permission On' doctype or not
if user_linked_with_permission_on_doctype(user_type_doc, self.name):
self.append('roles', {
'role': user_type_doc.role
})
frappe.msgprint(_('Role has been set as per the user type {0}')
.format(self.user_type), alert=True)
user_type_doc.update_modules_in_user(self)
def has_desk_access(self):
"""Return true if any of the set roles has desk access"""
if not self.roles:
return False
role_table = DocType("Role")
return frappe.db.count(role_table, ((role_table.desk_access == 1) & (role_table.name.isin([d.role for d in self.roles]))))
def share_with_self(self):
frappe.share.add(self.doctype, self.name, self.name, write=1, share=1,
flags={"ignore_share_permission": True})
def validate_share(self, docshare):
pass
# if docshare.user == self.name:
# if self.user_type=="System User":
# if docshare.share != 1:
# frappe.throw(_("Sorry! User should have complete access to their own record."))
# else:
# frappe.throw(_("Sorry! Sharing with Website User is prohibited."))
def send_password_notification(self, new_password):
try:
if self.flags.in_insert:
if self.name not in STANDARD_USERS:
if new_password:
# new password given, no email required
_update_password(user=self.name, pwd=new_password,
logout_all_sessions=self.logout_all_sessions)
if not self.flags.no_welcome_mail and cint(self.send_welcome_email):
self.send_welcome_mail_to_user()
self.flags.email_sent = 1
if frappe.session.user != 'Guest':
msgprint(_("Welcome email sent"))
return
else:
self.email_new_password(new_password)
except frappe.OutgoingEmailError:
print(frappe.get_traceback())
pass # email server not set, don't send email
@Document.hook
def validate_reset_password(self):
pass
def reset_password(self, send_email=False, password_expired=False):
from frappe.utils import random_string, get_url
key = random_string(32)
self.db_set("reset_password_key", key)
url = "/update-password?key=" + key
if password_expired:
url = "/update-password?key=" + key + '&password_expired=true'
link = get_url(url)
if send_email:
self.password_reset_mail(link)
return link
def get_other_system_managers(self):
user_doctype = DocType("User").as_("user")
user_role_doctype = DocType("Has Role").as_("user_role")
return (
frappe.qb.from_(user_doctype)
.from_(user_role_doctype)
.select(user_doctype.name)
.where(user_role_doctype.role == 'System Manager')
.where(user_doctype.docstatus < 2)
.where(user_doctype.enabled == 1)
.where(user_role_doctype.parent == user_doctype.name)
.where(user_role_doctype.parent.notin(["Administrator", self.name]))
.limit(1)
.distinct()
).run()
def get_fullname(self):
"""get first_name space last_name"""
return (self.first_name or '') + \
(self.first_name and " " or '') + (self.last_name or '')
def password_reset_mail(self, link):
self.send_login_mail(_("Password Reset"),
"password_reset", {"link": link}, now=True)
def send_welcome_mail_to_user(self):
from frappe.utils import get_url
link = self.reset_password()
subject = None
method = frappe.get_hooks("welcome_email")
if method:
subject = frappe.get_attr(method[-1])()
if not subject:
site_name = frappe.db.get_default('site_name') or frappe.get_conf().get("site_name")
if site_name:
subject = _("Welcome to {0}").format(site_name)
else:
subject = _("Complete Registration")
self.send_login_mail(subject, "new_user",
dict(
link=link,
site_url=get_url(),
))
def send_login_mail(self, subject, template, add_args, now=None):
"""send mail with login details"""
from frappe.utils.user import get_user_fullname
from frappe.utils import get_url
created_by = get_user_fullname(frappe.session['user'])
if created_by == "Guest":
created_by = "Administrator"
args = {
'first_name': self.first_name or self.last_name or "user",
'user': self.name,
'title': subject,
'login_url': get_url(),
'created_by': created_by
}
args.update(add_args)
sender = frappe.session.user not in STANDARD_USERS and get_formatted_email(frappe.session.user) or None
frappe.sendmail(recipients=self.email, sender=sender, subject=subject,
template=template, args=args, header=[subject, "green"],
delayed=(not now) if now is not None else self.flags.delay_emails, retry=3)
def a_system_manager_should_exist(self):
if not self.get_other_system_managers():
throw(_("There should remain at least one System Manager"))
def on_trash(self):
frappe.clear_cache(user=self.name)
if self.name in STANDARD_USERS:
throw(_("User {0} cannot be deleted").format(self.name))
self.a_system_manager_should_exist()
# disable the user and log him/her out
self.enabled = 0
if getattr(frappe.local, "login_manager", None):
frappe.local.login_manager.logout(user=self.name)
# delete todos
frappe.db.delete("ToDo", {"allocated_to": self.name})
todo_table = DocType("ToDo")
(
frappe.qb.update(todo_table)
.set(todo_table.assigned_by, None)
.where(todo_table.assigned_by == self.name)
).run()
# delete events
frappe.db.delete("Event", {"owner": self.name, "event_type": "Private"})
# delete shares
frappe.db.delete("DocShare", {"user": self.name})
# delete messages
table = DocType("Communication")
frappe.db.delete(
table,
filters=(
(table.communication_type.isin(["Chat", "Notification"]))
& (table.reference_doctype == "User")
& ((table.reference_name == self.name) | table.owner == self.name)
),
run=False,
)
# unlink contact
table = DocType("Contact")
frappe.qb.update(table).where(
table.user == self.name
).set(table.user, None).run()
# delete notification settings
frappe.delete_doc("Notification Settings", self.name, ignore_permissions=True)
if self.get('allow_in_mentions'):
frappe.cache().delete_key('users_for_mentions')
frappe.cache().delete_key('enabled_users')
def before_rename(self, old_name, new_name, merge=False):
frappe.clear_cache(user=old_name)
self.validate_rename(old_name, new_name)
def validate_rename(self, old_name, new_name):
# do not allow renaming administrator and guest
if old_name in STANDARD_USERS:
throw(_("User {0} cannot be renamed").format(self.name))
self.validate_email_type(new_name)
def validate_email_type(self, email):
from frappe.utils import validate_email_address
validate_email_address(email.strip(), True)
def after_rename(self, old_name, new_name, merge=False):
tables = frappe.db.get_tables()
for tab in tables:
desc = frappe.db.get_table_columns_description(tab)
has_fields = []
for d in desc:
if d.get('name') in ['owner', 'modified_by']:
has_fields.append(d.get('name'))
for field in has_fields:
frappe.db.sql("""UPDATE `%s`
SET `%s` = %s
WHERE `%s` = %s""" %
(tab, field, '%s', field, '%s'), (new_name, old_name))
if frappe.db.exists("Notification Settings", old_name):
frappe.rename_doc("Notification Settings", old_name, new_name, force=True, show_alert=False)
# set email
frappe.db.update("User", new_name, "email", new_name)
def append_roles(self, *roles):
"""Add roles to user"""
current_roles = [d.role for d in self.get("roles")]
for role in roles:
if role in current_roles:
continue
self.append("roles", {"role": role})
def add_roles(self, *roles):
"""Add roles to user and save"""
self.append_roles(*roles)
self.save()
def remove_roles(self, *roles):
existing_roles = dict((d.role, d) for d in self.get("roles"))
for role in roles:
if role in existing_roles:
self.get("roles").remove(existing_roles[role])
self.save()
def remove_all_roles_for_guest(self):
if self.name == "Guest":
self.set("roles", list(set(d for d in self.get("roles") if d.role == "Guest")))
def remove_disabled_roles(self):
disabled_roles = [d.name for d in frappe.get_all("Role", filters={"disabled":1})]
for role in list(self.get('roles')):
if role.role in disabled_roles:
self.get('roles').remove(role)
def ensure_unique_roles(self):
exists = []
for i, d in enumerate(self.get("roles")):
if (not d.role) or (d.role in exists):
self.get("roles").remove(d)
else:
exists.append(d.role)
def validate_username(self):
if not self.username and self.is_new() and self.first_name:
self.username = frappe.scrub(self.first_name)
if not self.username:
return
# strip space and @
self.username = self.username.strip(" @")
if self.username_exists():
if self.user_type == 'System User':
frappe.msgprint(_("Username {0} already exists").format(self.username))
self.suggest_username()
self.username = ""
def password_strength_test(self):
""" test password strength """
if self.flags.ignore_password_policy:
return
if self.__new_password:
user_data = (self.first_name, self.middle_name, self.last_name, self.email, self.birth_date)
result = test_password_strength(self.__new_password, '', None, user_data)
feedback = result.get("feedback", None)
if feedback and not feedback.get('password_policy_validation_passed', False):
handle_password_test_fail(result)
def suggest_username(self):
def _check_suggestion(suggestion):
if self.username != suggestion and not self.username_exists(suggestion):
return suggestion
return None
# @firstname
username = _check_suggestion(frappe.scrub(self.first_name))
if not username:
# @firstname_last_name
username = _check_suggestion(frappe.scrub("{0} {1}".format(self.first_name, self.last_name or "")))
if username:
frappe.msgprint(_("Suggested Username: {0}").format(username))
return username
def username_exists(self, username=None):
return frappe.db.get_value("User", {"username": username or self.username, "name": ("!=", self.name)})
def get_blocked_modules(self):
"""Returns list of modules blocked for that user"""
return [d.module for d in self.block_modules] if self.block_modules else []
def validate_user_email_inbox(self):
""" check if same email account added in User Emails twice """
email_accounts = [ user_email.email_account for user_email in self.user_emails ]
if len(email_accounts) != len(set(email_accounts)):
frappe.throw(_("Email Account added multiple times"))
def get_social_login_userid(self, provider):
try:
for p in self.social_logins:
if p.provider == provider:
return p.userid
except:
return None
def set_social_login_userid(self, provider, userid, username=None):
social_logins = {
"provider": provider,
"userid": userid
}
if username:
social_logins["username"] = username
self.append("social_logins", social_logins)
def get_restricted_ip_list(self):
if not self.restrict_ip:
return
return [i.strip() for i in self.restrict_ip.split(",")]
@classmethod
def find_by_credentials(cls, user_name: str, password: str, validate_password: bool = True):
"""Find the user by credentials.
This is a login utility that needs to check login related system settings while finding the user.
1. Find user by email ID by default
2. If allow_login_using_mobile_number is set, you can use mobile number while finding the user.
3. If allow_login_using_user_name is set, you can use username while finding the user.
"""
login_with_mobile = cint(frappe.db.get_value("System Settings", "System Settings", "allow_login_using_mobile_number"))
login_with_username = cint(frappe.db.get_value("System Settings", "System Settings", "allow_login_using_user_name"))
or_filters = [{"name": user_name}]
if login_with_mobile:
or_filters.append({"mobile_no": user_name})
if login_with_username:
or_filters.append({"username": user_name})
users = frappe.db.get_all('User', fields=['name', 'enabled'], or_filters=or_filters, limit=1)
if not users:
return
user = users[0]
user['is_authenticated'] = True
if validate_password:
try:
check_password(user['name'], password, delete_tracker_cache=False)
except frappe.AuthenticationError:
user['is_authenticated'] = False
return user
def set_time_zone(self):
if not self.time_zone:
self.time_zone = get_time_zone()
@frappe.whitelist()
def get_timezones():
import pytz
return {
"timezones": pytz.all_timezones
}
@frappe.whitelist()
def get_all_roles(arg=None):
"""return all roles"""
active_domains = frappe.get_active_domains()
roles = frappe.get_all("Role", filters={
"name": ("not in", "Administrator,Guest,All"),
"disabled": 0
}, or_filters={
"ifnull(restrict_to_domain, '')": "",
"restrict_to_domain": ("in", active_domains)
}, order_by="name")
return [ role.get("name") for role in roles ]
@frappe.whitelist()
def get_roles(arg=None):
"""get roles for a user"""
return frappe.get_roles(frappe.form_dict['uid'])
@frappe.whitelist()
def get_perm_info(role):
"""get permission info"""
from frappe.permissions import get_all_perms
return get_all_perms(role)
@frappe.whitelist(allow_guest=True)
def update_password(new_password, logout_all_sessions=0, key=None, old_password=None):
#validate key to avoid key input like ['like', '%'], '', ['in', ['']]
if key and not isinstance(key, str):
frappe.throw(_('Invalid key type'))
result = test_password_strength(new_password, key, old_password)
feedback = result.get("feedback", None)
if feedback and not feedback.get('password_policy_validation_passed', False):
handle_password_test_fail(result)
res = _get_user_for_update_password(key, old_password)
if res.get('message'):
frappe.local.response.http_status_code = 410
return res['message']
else:
user = res['user']
logout_all_sessions = cint(logout_all_sessions) or frappe.db.get_single_value("System Settings", "logout_on_password_reset")
_update_password(user, new_password, logout_all_sessions=cint(logout_all_sessions))
user_doc, redirect_url = reset_user_data(user)
# get redirect url from cache
redirect_to = frappe.cache().hget('redirect_after_login', user)
if redirect_to:
redirect_url = redirect_to
frappe.cache().hdel('redirect_after_login', user)
frappe.local.login_manager.login_as(user)
frappe.db.set_value("User", user, "last_password_reset_date", today())
frappe.db.set_value("User", user, "reset_password_key", "")
if user_doc.user_type == "System User":
return "/app"
else:
return redirect_url if redirect_url else "/"
@frappe.whitelist(allow_guest=True)
def test_password_strength(new_password, key=None, old_password=None, user_data=None):
from frappe.utils.password_strength import test_password_strength as _test_password_strength
password_policy = frappe.db.get_value("System Settings", None,
["enable_password_policy", "minimum_password_score"], as_dict=True) or {}
enable_password_policy = cint(password_policy.get("enable_password_policy", 0))
minimum_password_score = cint(password_policy.get("minimum_password_score", 0))
if not enable_password_policy:
return {}
if not user_data:
user_data = frappe.db.get_value('User', frappe.session.user,
['first_name', 'middle_name', 'last_name', 'email', 'birth_date'])
if new_password:
result = _test_password_strength(new_password, user_inputs=user_data)
password_policy_validation_passed = False
# score should be greater than 0 and minimum_password_score
if result.get('score') and result.get('score') >= minimum_password_score:
password_policy_validation_passed = True
result['feedback']['password_policy_validation_passed'] = password_policy_validation_passed
return result
#for login
@frappe.whitelist()
def has_email_account(email):
return frappe.get_list("Email Account", filters={"email_id": email})
@frappe.whitelist(allow_guest=False)
def get_email_awaiting(user):
waiting = frappe.get_all("User Email", fields=["email_account", "email_id"], filters={"awaiting_password": 1, "parent": user})
if waiting:
return waiting
else:
user_email_table = DocType("User Email")
frappe.qb.update(user_email_table).set(user_email_table.user_email_table, 0).where(user_email_table.parent == user).run()
return False
def ask_pass_update():
# update the sys defaults as to awaiting users
from frappe.utils import set_default
password_list = frappe.get_all("User Email", filters={"awaiting_password": True}, pluck="parent", distinct=True)
set_default("email_user_password", u','.join(password_list))
def _get_user_for_update_password(key, old_password):
# verify old password
result = frappe._dict()
if key:
result.user = frappe.db.get_value("User", {"reset_password_key": key})
if not result.user:
result.message = _("The Link specified has either been used before or Invalid")
elif old_password:
# verify old password
frappe.local.login_manager.check_password(frappe.session.user, old_password)
user = frappe.session.user
result.user = user
return result
def reset_user_data(user):
user_doc = frappe.get_doc("User", user)
redirect_url = user_doc.redirect_url
user_doc.reset_password_key = ''
user_doc.redirect_url = ''
user_doc.save(ignore_permissions=True)
return user_doc, redirect_url
@frappe.whitelist()
def verify_password(password):
frappe.local.login_manager.check_password(frappe.session.user, password)
@frappe.whitelist(allow_guest=True)
def sign_up(email, full_name, redirect_to):
if is_signup_disabled():
frappe.throw(_("Sign Up is disabled"), title=_("Not Allowed"))
user = frappe.db.get("User", {"email": email})
if user:
if user.enabled:
return 0, _("Already Registered")
else:
return 0, _("Registered but disabled")
else:
if frappe.db.get_creation_count('User', 60) > 300:
frappe.respond_as_web_page(_('Temporarily Disabled'),
_('Too many users signed up recently, so the registration is disabled. Please try back in an hour'),
http_status_code=429)
from frappe.utils import random_string
user = frappe.get_doc({
"doctype":"User",
"email": email,
"first_name": escape_html(full_name),
"enabled": 1,
"new_password": random_string(10),
"user_type": "Website User"
})
user.flags.ignore_permissions = True
user.flags.ignore_password_policy = True
user.insert()
# set default signup role as per Portal Settings
default_role = frappe.db.get_value("Portal Settings", None, "default_role")
if default_role:
user.add_roles(default_role)
if redirect_to:
frappe.cache().hset('redirect_after_login', user.name, redirect_to)
if user.flags.email_sent:
return 1, _("Please check your email for verification")
else:
return 2, _("Please ask your administrator to verify your sign-up")
@frappe.whitelist(allow_guest=True)
@rate_limit(limit=get_password_reset_limit, seconds = 24*60*60, methods=['POST'])
def reset_password(user):
if user=="Administrator":
return 'not allowed'
try:
user = frappe.get_doc("User", user)
if not user.enabled:
return 'disabled'
user.validate_reset_password()
user.reset_password(send_email=True)
return frappe.msgprint(
msg=_("Password reset instructions have been sent to your email"),
title=_("Password Email Sent")
)
except frappe.DoesNotExistError:
frappe.local.response['http_status_code'] = 400
frappe.clear_messages()
return 'not found'
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
def user_query(doctype, txt, searchfield, start, page_len, filters):
from frappe.desk.reportview import get_match_cond, get_filters_cond
conditions=[]
user_type_condition = "and user_type != 'Website User'"
if filters and filters.get('ignore_user_type'):
user_type_condition = ''
filters.pop('ignore_user_type')
txt = "%{}%".format(txt)
return frappe.db.sql("""SELECT `name`, CONCAT_WS(' ', first_name, middle_name, last_name)
FROM `tabUser`
WHERE `enabled`=1
{user_type_condition}
AND `docstatus` < 2
AND `name` NOT IN ({standard_users})
AND ({key} LIKE %(txt)s
OR CONCAT_WS(' ', first_name, middle_name, last_name) LIKE %(txt)s)
{fcond} {mcond}
ORDER BY
CASE WHEN `name` LIKE %(txt)s THEN 0 ELSE 1 END,
CASE WHEN concat_ws(' ', first_name, middle_name, last_name) LIKE %(txt)s
THEN 0 ELSE 1 END,
NAME asc
LIMIT %(page_len)s OFFSET %(start)s
""".format(
user_type_condition = user_type_condition,
standard_users=", ".join(frappe.db.escape(u) for u in STANDARD_USERS),
key=searchfield,
fcond=get_filters_cond(doctype, filters, conditions),
mcond=get_match_cond(doctype)
),
dict(start=start, page_len=page_len, txt=txt)
)
def get_total_users():
"""Returns total no. of system users"""
return flt(frappe.db.sql('''SELECT SUM(`simultaneous_sessions`)
FROM `tabUser`
WHERE `enabled` = 1
AND `user_type` = 'System User'
AND `name` NOT IN ({})'''.format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS)[0][0])
def get_system_users(exclude_users=None, limit=None):
if not exclude_users:
exclude_users = []
elif not isinstance(exclude_users, (list, tuple)):
exclude_users = [exclude_users]
limit_cond = ''
if limit:
limit_cond = 'limit {0}'.format(limit)
exclude_users += list(STANDARD_USERS)
system_users = frappe.db.sql_list("""select name from `tabUser`
where enabled=1 and user_type != 'Website User'
and name not in ({}) {}""".format(", ".join(["%s"]*len(exclude_users)), limit_cond),
exclude_users)
return system_users
def get_active_users():
"""Returns No. of system users who logged in, in the last 3 days"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type != 'Website User'
and name not in ({})
and hour(timediff(now(), last_active)) < 72""".format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS)[0][0]
def get_website_users():
"""Returns total no. of website users"""
return frappe.db.count("User", filters={"enabled": True, "user_type": "Website User"})
def get_active_website_users():
"""Returns No. of website users who logged in, in the last 3 days"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type = 'Website User'
and hour(timediff(now(), last_active)) < 72""")[0][0]
def get_permission_query_conditions(user):
if user=="Administrator":
return ""
else:
return """(`tabUser`.name not in ({standard_users}))""".format(
standard_users = ", ".join(frappe.db.escape(user) for user in STANDARD_USERS))
def has_permission(doc, user):
if (user != "Administrator") and (doc.name in STANDARD_USERS):
# dont allow non Administrator user to view / edit Administrator user
return False
def notify_admin_access_to_system_manager(login_manager=None):
if (login_manager
and login_manager.user == "Administrator"
and frappe.local.conf.notify_admin_access_to_system_manager):
site = '<a href="{0}" target="_blank">{0}</a>'.format(frappe.local.request.host_url)
date_and_time = '<b>{0}</b>'.format(format_datetime(now_datetime(), format_string="medium"))
ip_address = frappe.local.request_ip
access_message = _('Administrator accessed {0} on {1} via IP Address {2}.').format(
site, date_and_time, ip_address)
frappe.sendmail(
recipients=get_system_managers(),
subject=_("Administrator Logged In"),
template="administrator_logged_in",
args={'access_message': access_message},
header=['Access Notification', 'orange']
)
def extract_mentions(txt):
"""Find all instances of @mentions in the html."""
soup = BeautifulSoup(txt, 'html.parser')
emails = []
for mention in soup.find_all(class_='mention'):
if mention.get('data-is-group') == 'true':
try:
user_group = frappe.get_cached_doc('User Group', mention['data-id'])
emails += [d.user for d in user_group.user_group_members]
except frappe.DoesNotExistError:
pass
continue
email = mention['data-id']
emails.append(email)
return emails
def handle_password_test_fail(result):
suggestions = result['feedback']['suggestions'][0] if result['feedback']['suggestions'] else ''
warning = result['feedback']['warning'] if 'warning' in result['feedback'] else ''
suggestions += "<br>" + _("Hint: Include symbols, numbers and capital letters in the password") + '<br>'
frappe.throw(' '.join([_('Invalid Password:'), warning, suggestions]))
def update_gravatar(name):
gravatar = has_gravatar(name)
if gravatar:
frappe.db.set_value('User', name, 'user_image', gravatar)
def throttle_user_creation():
if frappe.flags.in_import:
return
if frappe.db.get_creation_count('User', 60) > frappe.local.conf.get("throttle_user_limit", 60):
frappe.throw(_('Throttled'))
@frappe.whitelist()
def get_role_profile(role_profile):
roles = frappe.get_doc('Role Profile', {'role_profile': role_profile})
return roles.roles
@frappe.whitelist()
def get_module_profile(module_profile):
module_profile = frappe.get_doc('Module Profile', {'module_profile_name': module_profile})
return module_profile.get('block_modules')
def create_contact(user, ignore_links=False, ignore_mandatory=False):
from frappe.contacts.doctype.contact.contact import get_contact_name
if user.name in ["Administrator", "Guest"]: return
contact_name = get_contact_name(user.email)
if not contact_name:
contact = frappe.get_doc({
"doctype": "Contact",
"first_name": user.first_name,
"last_name": user.last_name,
"user": user.name,
"gender": user.gender,
})
if user.email:
contact.add_email(user.email, is_primary=True)
if user.phone:
contact.add_phone(user.phone, is_primary_phone=True)
if user.mobile_no:
contact.add_phone(user.mobile_no, is_primary_mobile_no=True)
contact.insert(ignore_permissions=True, ignore_links=ignore_links, ignore_mandatory=ignore_mandatory)
else:
contact = frappe.get_doc("Contact", contact_name)
contact.first_name = user.first_name
contact.last_name = user.last_name
contact.gender = user.gender
# Add mobile number if phone does not exists in contact
if user.phone and not any(new_contact.phone == user.phone for new_contact in contact.phone_nos):
# Set primary phone if there is no primary phone number
contact.add_phone(
user.phone,
is_primary_phone=not any(
new_contact.is_primary_phone == 1 for new_contact in contact.phone_nos
)
)
# Add mobile number if mobile does not exists in contact
if user.mobile_no and not any(new_contact.phone == user.mobile_no for new_contact in contact.phone_nos):
# Set primary mobile if there is no primary mobile number
contact.add_phone(
user.mobile_no,
is_primary_mobile_no=not any(
new_contact.is_primary_mobile_no == 1 for new_contact in contact.phone_nos
)
)
contact.save(ignore_permissions=True)
@frappe.whitelist()
def generate_keys(user):
"""
generate api key and api secret
:param user: str
"""
frappe.only_for("System Manager")
user_details = frappe.get_doc("User", user)
api_secret = frappe.generate_hash(length=15)
# if api key is not set generate api key
if not user_details.api_key:
api_key = frappe.generate_hash(length=15)
user_details.api_key = api_key
user_details.api_secret = api_secret
user_details.save()
return {"api_secret": api_secret}
@frappe.whitelist()
def switch_theme(theme):
if theme in ["Dark", "Light", "Automatic"]:
frappe.db.set_value("User", frappe.session.user, "desk_theme", theme)
def get_enabled_users():
def _get_enabled_users():
enabled_users = frappe.get_all("User", filters={"enabled": "1"}, pluck="name")
return enabled_users
return frappe.cache().get_value("enabled_users", _get_enabled_users)
| |
import pylab as pl
import numpy as np
import matplotlib.pyplot as plt
from scipy import ndimage as ndi
from scipy.ndimage import measurements
from scipy import optimize
import EqnLine as line
from skimage import io
from skimage import measure, color
from skimage.morphology import watershed
from skimage.feature import peak_local_max
from skimage.filter import threshold_otsu, sobel
from skimage.filter import denoise_tv_chambolle
from skimage.util import img_as_ubyte
from scipy.ndimage.filters import median_filter, gaussian_filter
from peak_detect import *
import pickle
def save_data(filename, data):
import pickle
print("Saving data")
f = open(filename, 'w')
pickle.dump(data, f)
f.close()
def crop_box_3D(image, touch_pt, centres, size = 30):
"""
Crop a region around the touch point
and perform Siemens star resolution
analysis
"""
crops = []
for i in range(len(touch_pt)):
c1 = centres[i][0]
c2 = centres[i][1]
crop = image[int(touch_pt[i][0]) - size:int(touch_pt[i][0]) + size,
int(touch_pt[i][1]) - size:int(touch_pt[i][1]) + size,
int(touch_pt[i][2]) - size:int(touch_pt[i][2]) + size]
# pl.imshow(crop[:,:,30])
# pl.gray()
# pl.show()
crops.append(crop)
return crops
def watershed_3d(sphere):
"""
Markers should be int8
Image should be uint8
"""
sphere = median_filter(sphere, 3)
thresh = threshold_otsu(sphere)
sphere = (sphere >= thresh) * 1
sphere = sobel(sphere)
size = (sphere.shape[0], sphere.shape[1], sphere.shape[2])
marker = np.zeros(size, dtype=np.int16)
pl.imshow(sphere[:,:,50])
pl.show()
# mark everything outside as background
marker[5, :, :] = -1
marker[size[0] - 5, :, :] = -1
marker[:, :, 5] = -1
marker[:, :, size[2] - 5] = -1
marker[:, 5, :] = -1
marker[:, size[1] - 5, :] = -1
marker[:,0,0] = -1
# mark everything inside as a sphere
marker[size[0] / 2., size[1] / 2., size[2] / 2.] = 5
result = measurements.watershed_ift(sphere.astype(dtype=np.uint16), marker)
pl.imshow(result[:,:,50])
pl.show()
return result
def watershed_segmentation(image):
# #threshold
# image = median_filter(image, 5)
#
# filter = threshold_otsu(image)
# image = (image > filter) * 1
# Now we want to separate the two objects in image
# Generate the markers as local maxima of the distance to the background
distance = ndi.distance_transform_edt(image)
local_maxi = peak_local_max(distance, indices=False, footprint=np.ones((3, 3)),
labels=image)
markers = ndi.label(local_maxi)[0]
labels = watershed(-distance, markers, mask=image)
# fig, axes = plt.subplots(ncols=3, figsize=(8, 2.7))
# ax0, ax1, ax2 = axes
#
# ax0.imshow(image, cmap=plt.cm.gray, interpolation='nearest')
# ax0.set_title('Overlapping objects')
# ax1.imshow(-distance, cmap=plt.cm.jet, interpolation='nearest')
# ax1.set_title('Distances')
# ax2.imshow(labels, cmap=plt.cm.spectral, interpolation='nearest')
# ax2.set_title('Separated objects')
#
# for ax in axes:
# ax.axis('off')
#
# fig.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
# right=1)
# plt.show()
return labels
def centres_of_mass_2D(image):
"""
Calculates centres of mass
for all the labels
"""
centroids = []
bords = []
areas = []
radius = []
for info in measure.regionprops(image, ['Centroid', 'BoundingBox', 'equivalent_diameter']):
centre = info['Centroid']
minr, minc, maxr, maxc = info['BoundingBox']
D = info['equivalent_diameter']
margin = 0
radius.append((D / 2.0))
bords.append((minr-margin, minc-margin, maxr+margin, maxc+margin))
areas.append(image[minr-margin:maxr+margin,minc-margin:maxc+margin].copy())
centroids.append(centre)
return centroids, areas, bords, radius
def watershed_slicing(image):
"""
Does the watershed algorithm slice by slice.
Then use the labeled image to calculate the centres of
mass for each slice.
"""
image = median_filter(image, 3)
thresh = threshold_otsu(image)
image = (image > thresh) * 1
N = len(image)
slice_centroids = []
slice_radius = []
for i in range(N):
slice = image[:, :, i]
labels_slice = watershed_segmentation(slice)
centroids, areas, bords, radius = centres_of_mass_2D(labels_slice)
slice_centroids.append(centroids)
slice_radius.append(radius)
# if i > 49:
# print centroids
# pl.imshow(labels_slice)
# pl.show()
return slice_centroids, slice_radius
################# DRAW TEST DATA ######################################
def draw_sphere():
import numpy as np
sphere = np.zeros((100, 100 ,100))
N = 100
radius1 = 20
radius2 = 20
centre1 = (30, 30, 50)
centre2 = (30, 69, 50)
Xc1 = centre1[0]
Yc1 = centre1[1]
Zc1 = centre1[2]
Xc2 = centre2[0]
Yc2 = centre2[1]
Zc2 = centre2[2]
Y, X, Z = np.meshgrid(np.arange(N), np.arange(N), np.arange(N))
mask1 = (((X - Xc1)**2 + (Y - Yc1)**2 + (Z - Zc1)**2) < radius1**2)
mask2 = (((X - Xc2)**2 + (Y - Yc2)**2 + (Z - Zc2)**2) < radius2**2)
sphere[mask1] = 1
sphere[mask2] = 1
return sphere
def add_noise(np_image, amount):
import numpy as np
noise = np.random.randn(np_image.shape[0],np_image.shape[1],np_image.shape[2])
norm_noise = noise/np.max(noise)
np_image = np_image + norm_noise*np.max(np_image)*amount
return np_image
#############################################################################
from test_analysis import test_analyse
sphere = draw_sphere()
sphere = add_noise(sphere, 0.3)
#sphere = gaussian_filter(sphere, 3)
centroids, radii = watershed_slicing(sphere)
rad, cent = test_analyse.analyse(radii, centroids)
touch_pt, centres = line.find_contact_3D(cent, rad, tol = 2)
#crop_img, slope = crop_box_3D(sphere, touch_pt, centres, size = 30)
pt1 = cent[0]
pt2 = cent[1]
line.touch_lines_3D(pt1, pt2, sphere)
# image = io.imread("test_slice.tif")
# sphere = np.load('sphere1.npy')
# centroids, radii = watershed_slicing(sphere)
# save_data("test_analysis/centroids.dat", centroids)
# save_data("test_analysis/radii.dat", radii)
# labels = watershed_segmentation(image)
#
# centroids, areas, bords, radius, radius2 = centres_of_mass_2D(labels)
#
# # leastsq_circle_fit(areas, centroids, bords, radius)
# # leastsq_whole(image, centroids)
# touch, centres = find_contact(centroids, radius2)
#
# crop_img, slopes = crop_box(image, touch, centres)
#
# line.eqn_line(crop_img[0], slopes[0])
| |
# -*- coding: utf-8 -*-
"""
"""
from __future__ import absolute_import
import sys
import operator
import numpy as np
from . import markup
from .registry import unit_registry
from .decorators import memoize
def assert_isinstance(obj, types):
try:
assert isinstance(obj, types)
except AssertionError:
raise TypeError(
"arg %r must be of type %r, got %r" % (obj, types, type(obj))
)
class Dimensionality(dict):
"""
"""
@property
def ndims(self):
return sum(abs(i) for i in self.simplified.values())
@property
def simplified(self):
if len(self):
rq = 1*unit_registry['dimensionless']
for u, d in self.items():
rq = rq * u.simplified**d
return rq.dimensionality
else:
return self
@property
def string(self):
return markup.format_units(self)
@property
def unicode(self):
return markup.format_units_unicode(self)
@property
def latex(self):
return markup.format_units_latex(self)
def __hash__(self):
res = hash(unit_registry['dimensionless'])
for key in sorted(self.keys(), key=operator.attrgetter('format_order')):
val = self[key]
if val < 0:
# can you believe that hash(-1)==hash(-2)?
val -= 1
res ^= hash((key, val))
return res
def __add__(self, other):
assert_isinstance(other, Dimensionality)
try:
assert self == other
except AssertionError:
raise ValueError(
'can not add units of %s and %s'\
%(str(self), str(other))
)
return self.copy()
__radd__ = __add__
def __iadd__(self, other):
assert_isinstance(other, Dimensionality)
try:
assert self == other
except AssertionError:
raise ValueError(
'can not add units of %s and %s'\
%(str(self), str(other))
)
return self
def __sub__(self, other):
assert_isinstance(other, Dimensionality)
try:
assert self == other
except AssertionError:
raise ValueError(
'can not subtract units of %s and %s'\
%(str(self), str(other))
)
return self.copy()
__rsub__ = __sub__
def __isub__(self, other):
assert_isinstance(other, Dimensionality)
try:
assert self == other
except AssertionError:
raise ValueError(
'can not add units of %s and %s'\
%(str(self), str(other))
)
return self
def __mul__(self, other):
assert_isinstance(other, Dimensionality)
new = Dimensionality(self)
for unit, power in other.items():
try:
new[unit] += power
if new[unit] == 0:
new.pop(unit)
except KeyError:
new[unit] = power
return new
def __imul__(self, other):
assert_isinstance(other, Dimensionality)
for unit, power in other.items():
try:
self[unit] += power
if self[unit] == 0:
self.pop(unit)
except KeyError:
self[unit] = power
return self
def __truediv__(self, other):
assert_isinstance(other, Dimensionality)
new = Dimensionality(self)
for unit, power in other.items():
try:
new[unit] -= power
if new[unit] == 0:
new.pop(unit)
except KeyError:
new[unit] = -power
return new
if sys.version_info[0] < 3:
def __div__(self, other):
assert_isinstance(other, Dimensionality)
return self.__truediv__(other)
def __itruediv__(self, other):
assert_isinstance(other, Dimensionality)
for unit, power in other.items():
try:
self[unit] -= power
if self[unit] == 0:
self.pop(unit)
except KeyError:
self[unit] = -power
return self
if sys.version_info[0] < 3:
def __idiv__(self, other):
assert_isinstance(other, Dimensionality)
return self.__itruediv__(other)
def __pow__(self, other):
try:
assert np.isscalar(other)
except AssertionError:
raise TypeError('exponent must be a scalar, got %r' % other)
if other == 0:
return Dimensionality()
new = Dimensionality(self)
for i in new:
new[i] *= other
return new
def __ipow__(self, other):
try:
assert np.isscalar(other)
except AssertionError:
raise TypeError('exponent must be a scalar, got %r' % other)
if other == 0:
self.clear()
return self
for i in self:
self[i] *= other
return self
def __repr__(self):
return 'Dimensionality({%s})' \
% ', '.join(['%s: %s'% (u.name, e) for u, e in self.items()])
def __str__(self):
if markup.config.use_unicode:
return self.unicode
else:
return self.string
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return hash(self) != hash(other)
__neq__ = __ne__
def __gt__(self, other):
return self.ndims > other.ndims
def __ge__(self, other):
return self.ndims >= other.ndims
def __lt__(self, other):
return self.ndims < other.ndims
def __le__(self, other):
return self.ndims <= other.ndims
def copy(self):
return Dimensionality(self)
p_dict = {}
def _d_multiply(q1, q2, out=None):
try:
return q1._dimensionality * q2._dimensionality
except AttributeError:
try:
return q1.dimensionality
except:
return q2.dimensionality
p_dict[np.multiply] = _d_multiply
p_dict[np.cross] = _d_multiply
def _d_divide(q1, q2, out=None):
try:
return q1._dimensionality / q2._dimensionality
except AttributeError:
try:
return q1.dimensionality
except:
return q2.dimensionality**-1
p_dict[np.divide] = _d_divide
p_dict[np.true_divide] = _d_divide
def _d_check_uniform(q1, q2, out=None):
try:
assert q1._dimensionality == q2._dimensionality
return q1.dimensionality
except AssertionError:
raise ValueError(
'quantities must have identical units, got "%s" and "%s"' %
(q1.units, q2.units)
)
except AttributeError:
try:
if hasattr(q1, 'dimensionality'):
# q2 was not a quantity
if not q1._dimensionality or not np.asarray(q2).any():
return q1.dimensionality
else:
raise ValueError
elif hasattr(q2, 'dimensionality'):
# q1 was not a quantity
if not q2._dimensionality or not np.asarray(q1).any():
return q2.dimensionality
else:
raise ValueError
except ValueError:
raise ValueError(
'quantities must have identical units, got "%s" and "%s"' %
(q1.units, q2.units)
)
p_dict[np.add] = _d_check_uniform
p_dict[np.subtract] = _d_check_uniform
p_dict[np.mod] = _d_check_uniform
p_dict[np.fmod] = _d_check_uniform
p_dict[np.remainder] = _d_check_uniform
p_dict[np.floor_divide] = _d_check_uniform
p_dict[np.arctan2] = _d_check_uniform
p_dict[np.hypot] = _d_check_uniform
def _d_power(q1, q2, out=None):
if getattr(q2, 'dimensionality', None):
raise ValueError("exponent must be dimensionless")
try:
q2 = np.array(q2)
p = q2.min()
if p != q2.max():
raise ValueError('Quantities must be raised to a uniform power')
return q1._dimensionality**p
except AttributeError:
return Dimensionality()
p_dict[np.power] = _d_power
def _d_square(q1, out=None):
return q1._dimensionality**2
p_dict[np.square] = _d_square
def _d_reciprocal(q1, out=None):
return q1._dimensionality**-1
p_dict[np.reciprocal] = _d_reciprocal
def _d_copy(q1, out=None):
return q1.dimensionality
p_dict[np.absolute] = _d_copy
p_dict[np.conjugate] = _d_copy
p_dict[np.negative] = _d_copy
p_dict[np.ones_like] = _d_copy
p_dict[np.rint] = _d_copy
p_dict[np.floor] = _d_copy
p_dict[np.fix] = _d_copy
p_dict[np.ceil] = _d_copy
def _d_sqrt(q1, out=None):
return q1._dimensionality**0.5
p_dict[np.sqrt] = _d_sqrt
def _d_radians(q1, out=None):
try:
assert q1.units == unit_registry['degree']
except AssertionError:
raise ValueError(
'expected units of degrees, got "%s"' % q1._dimensionality
)
return unit_registry['radian'].dimensionality
p_dict[np.radians] = _d_radians
def _d_degrees(q1, out=None):
try:
assert q1.units == unit_registry['radian']
except AssertionError:
raise ValueError(
'expected units of radians, got "%s"' % q1._dimensionality
)
return unit_registry['degree'].dimensionality
p_dict[np.degrees] = _d_degrees
def _d_dimensionless(q1, out=None):
if getattr(q1, 'dimensionality', None):
raise ValueError("quantity must be dimensionless")
return Dimensionality()
p_dict[np.log] = _d_dimensionless
p_dict[np.log10] = _d_dimensionless
p_dict[np.log2] = _d_dimensionless
p_dict[np.log1p] = _d_dimensionless
p_dict[np.exp] = _d_dimensionless
p_dict[np.expm1] = _d_dimensionless
p_dict[np.logaddexp] = _d_dimensionless
p_dict[np.logaddexp2] = _d_dimensionless
def _d_trig(q1, out=None):
try:
assert q1.units == unit_registry['radian']
except AssertionError:
raise ValueError(
'expected units of radians, got "%s"' % q1._dimensionality
)
return Dimensionality()
p_dict[np.sin] = _d_trig
p_dict[np.sinh] = _d_trig
p_dict[np.cos] = _d_trig
p_dict[np.cosh] = _d_trig
p_dict[np.tan] = _d_trig
p_dict[np.tanh] = _d_trig
def _d_arctrig(q1, out=None):
if getattr(q1, 'dimensionality', None):
raise ValueError("quantity must be dimensionless")
return unit_registry['radian'].dimensionality
p_dict[np.arcsin] = _d_arctrig
p_dict[np.arcsinh] = _d_arctrig
p_dict[np.arccos] = _d_arctrig
p_dict[np.arccosh] = _d_arctrig
p_dict[np.arctan] = _d_arctrig
p_dict[np.arctanh] = _d_arctrig
| |
from __future__ import print_function
from builtins import str
from builtins import range
from builtins import object
import os, json, re
from PySide import QtGui, QtCore
from medley.helpers import load_json, save_json
from medley.playlist import Playlist
from medley.formats import M3U
from medley.content import Content
from shared import all_contents, configs, cli_items
from playlist_view import PlaylistModel, find_contents
class Node(object):
"""
Nodes are used to build tree structures
in this case they represent a hierarchy of medley.Playlists
These Node objects also have methods
to allow them to work in a Qt based system.
In this case, they are part of a TreeModel,
which handles interactions (moving nodes, etc) within the tree.
"""
def __init__( self, name, parent=None ):
"""
Instantiates a new tree item
"""
self._name = name
self._parent = parent
self.children = []
self.source = ''
self.content = Playlist()
if parent is not None:
parent.addChild(self)
def __str__(self):
return str(self._name)
def __repr__(self):
return str(self._name)
#return self.log()
def typeInfo(self):
return "NODE"
def addChild(self, child):
self.children.append(child)
child._parent = self
return True
def insertChild(self, position, child):
if position < 0 or position > len(self.children):
return False
self.children.insert(position, child)
child._parent = self
return True
def removeChild(self, position):
if position < 0 or position > len(self.children):
return False
child = self.children.pop(position)
child._parent = None
return True
def name(self):
return self._name
def setName(self, name):
self._name = name
def child(self, row):
if row < len(self.children):
return self.children[row]
else:
print("No child at row: %s" % row)
return None
def childCount(self):
return len(self.children)
def parent(self):
return self._parent
def row(self):
if self._parent is not None:
return self._parent.children.index(self)
def as_dict(self):
"""
return a simple dictionary representation of the node
(useful for serializing)
"""
copy = {}
copy['name'] = self.name()
#assumes this is ok for JSON.dumps:
#if it refers to a Playlist object, it probably is not OK
#copy['content'] = self.content
#TODO:
#in that case, it's better to save the Playlist to self.source
copy['source'] = self.source
copy['children'] = []
for child in self.children:
copy['children'].append(child.as_dict())
return copy
def to_json(self):
"""
json representation of object
"""
return json.dumps(self.as_dict())
def save_all(self):
"""
recursively call save for all nodes
"""
self.save()
for child in self.children:
child.save_all()
def save(self):
"""
save the current playlist
different than to_json, which creates the whole structure,
including children
"""
if self.source:
#content is really a Playlist object here
#TODO:
#refactor this to be less confusing.
self.content.save(self.source)
#TODO:
#should check if a Playlist has any changes before saving
#also consider setting an option to automatically
#increment a date section of a filename if changes occur
#(for automated file versions)
def from_json(self, data='', item={}):
"""
load a previously serialized Node structure
data assumed to be a json string
item assumed to be a simple object (non Node)
"""
if data:
simple = json.loads(data)
elif item:
simple = item
else:
raise ValueError("No data to load from_json: %s" % data)
#print simple
if 'name' in simple:
self.setName(simple['name'])
#ok to load it if it has it, but probably better to load source
if 'content' in simple:
self.content = simple['content']
if 'source' in simple:
self.source = simple['source']
if self.source:
#load self.source into self.content here
#playlist = load_playlist(self.source)
#print "loading playlist: %s" % self.source
#playlist = Playlist(debug=True)
playlist = Playlist()
playlist.load(self.source, all_contents)
self.content = playlist
else:
#no source specified
#create an empty Playlist here instead
#self.content = Playlist()
#self.content should have already been initialized
#with an empty/new Playlist()
pass
if 'children' in simple:
for item in simple['children']:
child = Node('child')
child.from_json(item=item)
self.addChild(child)
def log(self, tabLevel=-1):
output = ""
tabLevel += 1
for i in range(tabLevel):
output += "\t"
output += "|------" + self._name + "\n"
for child in self.children:
output += child.log(tabLevel)
output += "\n"
return output
#other potential base classes
#QtCore.QAbstractListModel
#QtCore.QAbstractTableModel
class TreeModel(QtCore.QAbstractItemModel):
def __init__(self, root, parent=None):
super(TreeModel, self).__init__(parent)
self.root = root
def rowCount(self, parent):
if not parent.isValid():
parentNode = self.root
else:
parentNode = parent.internalPointer()
return parentNode.childCount()
def columnCount(self, index):
"""
how many columns to show?
"""
#return 2
return 1
def data(self, index, role):
"""
this determines what is displayed in a View
for a given item at index 'index'
role determines which context the data is shown in
"""
if not index.isValid():
return None
node = index.internalPointer()
#print node.log()
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
## row = index.row()
## value = self.nodes[row]
## return value._name
if index.column() == 0:
return node.name()
## elif role == QtCore.Qt.DecorationRole:
## if index.column() == 0:
## typeInfo = node.typeInfo()
## if typeInfo == "LIGHT":
## return QtGui.QIcon(QtGui.QPixmap(":/Light.png"))
## #row = index.row()
## #value = self.nodes[row]
## pixmap = QtGui.QPixmap(26, 26)
## pixmap.fill('#000000')
## icon = QtGui.QIcon(pixmap)
## return icon
## elif role == QtCore.Qt.EditRole:
## #what is shown when we're editing an item:
## row = index.row()
## value = self.nodes[row]
## return value._name
elif role == QtCore.Qt.ToolTipRole:
return "Item details: %s" % node.name()
elif role == 0:
return index.internalPointer()
else:
return None
def headerData(self, section, orientation, role):
if role == QtCore.Qt.DisplayRole:
if orientation == QtCore.Qt.Horizontal:
return "Content"
else:
#must be a row name (running vertically):
return "Item %s" % (section)
def flags(self, index):
"""
Valid items are selectable, editable, and drag and drop enabled.
Invalid indices (open space in the view)
are also drop enabled, so you can drop items onto the top level.
"""
if not index.isValid():
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsDropEnabled
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsDropEnabled | \
QtCore.Qt.ItemIsDragEnabled | \
QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsEditable
def setData(self, index, value, role=QtCore.Qt.EditRole):
"""
this handles the new data once an edit is complete
"""
if index.isValid():
if role == QtCore.Qt.EditRole:
node = index.internalPointer()
node.setName(value)
return True
return False
## def setData(self, index, value, role = QtCore.Qt.EditRole):
## """
## this handles the new data once an edit is complete
## """
## if role == QtCore.Qt.EditRole:
## row = index.row()
## self.nodes[row]._name = value
## #for alerting other views that the data has changed:
## self.dataChanged.emit(index, index)
## return True
def insertRows(self, row, count, parentIndex=QtCore.QModelIndex()):
"""
Add a number of rows to the model at the given row and parent.
"""
#default parent to the root:
#parentIndex = QtCore.QModelIndex()
self.beginInsertRows( parentIndex, row, row+count-1 )
parent = self.getNode(parentIndex)
for i in range(count):
childCount = parent.childCount()
child = Node("untitled" + str(childCount))
success = parent.insertChild(row, child)
self.endInsertRows()
return success
def removeRows(self, row, count, parentIndex=QtCore.QModelIndex()):
"""
Remove a number of rows from the model at the given row and parent.
"""
self.beginRemoveRows( parentIndex, row, row+count-1 )
parent = self.getNode(parentIndex)
for i in range(count):
success = parent.removeChild(row)
#value = self.nodes[row]
#self.nodes.remove(value)
self.endRemoveRows()
return success
## def index(self, row, column, parent):
## return self.createIndex(row, column, self.nodes[row])
def index(self, row, column, parent):
"""
Needed for TreeViews specifically
"""
parentNode = self.getNode(parent)
childItem = parentNode.child(row)
if childItem:
return self.createIndex(row, column, childItem)
else:
return QtCore.QModelIndex()
def parent(self, index):
"""
Needed for TreeViews specifically
"""
node = index.internalPointer()
parentNode = node.parent()
if parentNode == self.root:
return QtCore.QModelIndex()
return self.createIndex(parentNode.row(), 0, parentNode)
def getNode(self, index):
"""
Returns the Node instance from a QModelIndex.
"""
if index.isValid():
node = index.internalPointer()
if node:
return node
return self.root
def supportedDropActions(self):
"""
Items can be moved and copied
"""
#return QtCore.Qt.CopyAction | QtCore.Qt.MoveAction
return QtCore.Qt.MoveAction | QtCore.Qt.CopyAction
def mimeTypes(self):
#return ['text/json']
return ['json/content', 'text/json']
def mimeData(self, indices):
mimedata = QtCore.QMimeData()
item = self.getNode( indices[0] )
#mimedata.setData('text/xml', item)
#print json.dumps(item)
mimedata.setData('text/json', item.to_json())
return mimedata
def dropMimeData(self, mimedata, action, row, column, parent):
#print dir(mimedata)
#print mimedata.data.keys()
print('dropMimeData %s %s %s %s' % (mimedata.data('text/json'), action, row, parent))
formats = mimedata.formats()
print("formats: %s" % formats)
if 'text/json' in formats:
item = Node('')
item.from_json( str(mimedata.data('text/json')) )
dropParent = self.getNode( parent )
dropParent.addChild( item )
#self.insertRows( dropParent.numChildren()-1, 1, parentIndex )
elif 'json/content' in formats:
print("Adding content to playlist")
dropParent = self.getNode( parent )
#self.cur_node = self.cur_index.internalPointer()
print("using content: %s" % dropParent.content)
subtree = PlaylistModel(dropParent.content)
subtree.dropMimeData(mimedata, action, row, column, parent)
self.dataChanged.emit( parent, parent )
return True
class PlaylistsTreeView(QtGui.QTreeView):
"""
customizing TreeView to handle selections appropriately:
http://stackoverflow.com/questions/4160111/pyqt-qtreeview-trying-to-connect-to-the-selectionchanged-signal
"""
def __init__(self, parent=None):
"""
"""
super(PlaylistsTreeView, self).__init__(parent)
self.model = None
self.cur_item = None
self.cur_index = QtCore.QModelIndex()
self.cur_node = None
self.last_folder = None
root = Node("root")
## for i in range(4):
## node = Node('node%s'%i)
## node.addChild(Node('node%s%s'%(i, i)))
## root.addChild(node)
## #self.nodes = [Node('node0'), Node('node1'), Node('node2')]
self.playlists = TreeModel(root)
#print root.log()
previous = False
previous_path = configs.get('previously')
if previous_path and os.path.exists(previous_path):
#try:
self.load_lists(previous_path)
previous = True
#except:
# print "Error loading previous configuration: %s" % previous_path
## #old way, when loading configs locally here
## if self.configs.has_key('previously'):
## if self.configs['previously']:
## if os.path.exists(self.configs['previously']):
## #try:
## self.load_lists(self.configs['previously'])
## previous = True
## #except:
## # print "Error loading previous configuration: %s" % self.configs['previously']
if not previous:
print("Could not find a valid previous setup... starting blank")
#self.playlists.root.from_json(item={})
self.load_lists("blank.json")
if cli_items:
print("Items passed in from cli: %s" % cli_items)
#print root.children
for fname in cli_items:
contents = find_contents(fname)
for item in contents:
#default to the first list...
#will be Clean Slate if new
#could make it the last one... or something else
#TBD
root.children[0].content.add_if_new(item)
#not sure that this step is even necessary here...
#just try adding it to the Playlist
## contents = []
## contents = find_contents(fname)
## print "CONTENTS DURING LOAD: %s" % contents
## result = self.add_contents(contents)
## print result
## print
#initialize data here:
self.setModel(self.playlists)
#we don't need the header here
#self.tree_view.setHeaderHidden(True)
self.setHeaderHidden(True)
#this only allows moving... no copying:
#self.tree_view.setDragDropMode(QtGui.QAbstractItemView.InternalMove)
#self.tree_view.setDragDropMode(QtGui.QAbstractItemView.DragDrop)
self.setDragDropMode(QtGui.QAbstractItemView.DragDrop)
#this sets the default drag mode from Copy (default) to Move
#move seems more consistent with other interface behavior
#self.tree_view.setDefaultDropAction(QtCore.Qt.MoveAction)
self.setDefaultDropAction(QtCore.Qt.MoveAction)
#self.tree_view.setDragEnabled(True)
#has no effect on trees, but might work in tables
#self.tree_view.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
def load_lists(self, fname):
"""
for internal calls to load
"""
items = load_json(fname)
self.playlists.root.from_json(item=items)
#for item in items:
# print type(item)
def setModel(self, model):
super(PlaylistsTreeView, self).setModel(model)
self.model = model
#checking to see if the shortcut (following line) works instead of this:
## self.connect(self.selectionModel(),
## QtCore.SIGNAL("selectionChanged(QItemSelection, QItemSelection)"),
## self.change_selection)
#this seems to perform the same function:
#keeping both versions around, since legacy code uses SIGNAL
#this is a good reminder/guide on how to convert them
#
#also [2013.09.10 10:55:31]
#both versions cause a segfault on linux with the following:
#Python 2.7.4, Pyside 1.1.2, Qt 4.8.4
#
#this happens if nothing was loaded from a previous session
#and will cause a "Segmentation fault (core dumped)"
#
#this one happens sooner since it is called immediately
#
#self.selectionModel().selectionChanged.connect(self.change_selection)
sm = self.selectionModel()
#this is needed to avoid SegFaults on Linux:
#http://srinikom.github.io/pyside-bz-archive/1041.htlm
#sm.setParent(None)
sm.selectionChanged.connect(self.change_selection)
def change_selection(self, newSelection, oldSelection):
#print "changed"
#this is *not* a QItemSelectionModel:
#http://srinikom.github.io/pyside-docs/PySide/QtGui/QItemSelectionModel.html#PySide.QtGui.QItemSelectionModel
#print newSelection.model()
#this is a QItemSelection:
#http://srinikom.github.io/pyside-docs/PySide/QtGui/QItemSelection.html#PySide.QtGui.QItemSelection
#print newSelection.length()
#print newSelection.indexes()[0].model().name()
#there is only one TreeModel object:
## self.cur_item = newSelection.indexes()[0].model()
## print dir(self.cur_item)
## print type(self.cur_item)
## print "cur_item"
# this is true!
## assert self.cur_item == self.model
## print ""
#make sure we have something before trying to change:
if newSelection and len(newSelection.indexes()):
#these are equivalent:
#print newSelection.indexes()[0].data()
#and
self.cur_index = newSelection.indexes()[0]
#print self.cur_item.data(self.cur_index, QtCore.Qt.DisplayRole)
#print dir(self.cur_index)
#print type(self.cur_index)
#print "cur_index"
#print ""
self.cur_node = self.cur_index.internalPointer()
self.parent().change_selection(self.cur_node)
def update_location(self, destination):
print("UPDATE LOCATION CALLED: %s" % destination)
self.cur_node.source = destination
self.cur_node.save()
def open_list(self):
"""
only open an individual list and append it to the current tree
"""
#print "OOOOPEN!!!"
if self.last_folder:
fname, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open Playlist',
self.last_folder)
else:
fname, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open Playlist')
playlist = Playlist()
json_check = re.compile('.*\.json$')
m3u_check = re.compile('.*\.m3u$')
#check if fname is a .json playlist
if json_check.search(fname):
#open fname here and assign Playlist object as child.content
#playlist = load_playlist(fname)
playlist.load(fname, all_contents)
#or a .m3u playlist
elif m3u_check.search(fname):
#open m3u:
m3u = M3U()
#TODO: consider 'look_for_meta'... what to do if content exists?
m3u.load(fname)
playlist.extend(m3u[:])
#start by converting to to a standard .json list
#will need to make sure each referenced media
#has a corresponding Content object
#playlist_view.find_contents(media_path) should take care of that
#then create a new fname with a json destination
#for automatic saving of updates in the future
m3u_name_only = os.path.basename(fname)
parts = m3u_name_only.split('.')
parts[-1] = 'json'
json_name = '.'.join(parts)
fname = os.path.join(os.path.dirname(fname), json_name)
print(fname)
else:
print("UNKNOWN PLAYLIST FORMAT: %s" % fname)
#def insertRows(self, row, count, parentIndex=QtCore.QModelIndex()):
parent = self.model.getNode(self.cur_index)
#how many to insert:
count = 1
child_count = parent.childCount()
self.model.beginInsertRows( self.cur_index, child_count, child_count+count-1 )
for i in range(count):
name_only = os.path.basename(fname)
child = Node(name_only)
child.source = fname
child.content = playlist
#add Node to tree of playlists:
success = parent.insertChild(child_count, child)
self.model.endInsertRows()
return success
def add(self):
"""
add a new child node to the currently selected node
this is for adding a new playlist
"""
#print "ADDDDDD!!!"
if self.cur_index:
#parent = self.cur_index.parent()
#row = self.cur_index.row()
#self.model.insertRows(row, 1, parent)
self.model.insertRows(0, 1, self.cur_index)
else:
self.model.insertRows(0, 1)
#if self.cur_index:
# new_node = Node("New Node")
# self.cur_node.addChild(new_node)
#print self.model.root.log()
def remove(self):
"""
removed the currently selected node
(and all children?)
"""
#print "REMOOOOOOVE!!!"
parent = self.cur_index.parent()
row = self.cur_index.row()
self.model.removeRows(row, 1, parent)
#this works, but something gets out of sync
#parent = self.cur_node.parent()
#parent.children.remove(self.cur_node)
#these do not work:
#parent.removeChild(self.cur_node)
#del self.cur_node
#print self.model.root.as_dict()
#print self.model.root.log()
def save_configs(self):
"""
save self.configs to local 'configs.json' file
"""
#save_json(self.config_source, self.configs)
configs.save_configs()
def import_lists(self):
#using self.last_folder to remember previously opened location
if self.last_folder:
fname, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open file',
self.last_folder)
else:
fname, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open file')
print("Children pre:")
for child in self.playlists.root.children:
print(child.name)
items = load_json(fname)
#make a new node based on items root:
child = Node('new_root')
#child.from_json(item=item)
self.playlists.root.addChild(child)
child.from_json(item=items)
self.setModel(self.playlists)
print("Children post:")
for child in self.playlists.root.children:
print(child.name)
def open_lists(self):
"""
generate a TreeModel based on a previously saved structure
"""
#using self.last_folder to remember previously opened location
if self.last_folder:
fname, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open file',
self.last_folder)
else:
fname, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open file')
#print "OPEN LISTS CALLED: %s" % fname
if fname:
self.load_lists(fname)
def save_lists(self):
"""
convert the current TreeModel to a json file for later retrieval
"""
#On OS X these both only flash the Dialog window
#but never complete the process (until the application exits)
#fname = QtGui.QFileDialog.getSaveFileName(self, 'Save file', '/')
#fname, _ = QtGui.QFileDialog.getSaveFileName(self, 'Save file', '/')
#this at least throws an error (but still doesn't work)
## options = QtGui.QFileDialog.Options()
## if not self.native.isChecked():
## options |= QtGui.QFileDialog.DontUseNativeDialog
## fileName, filtr = QtGui.QFileDialog.getSaveFileName(self,
## "QFileDialog.getSaveFileName()",
## self.saveFileNameLabel.text(),
## "All Files (*);;Text Files (*.txt)", options)
## if fileName:
## self.saveFileNameLabel.setText(fileName)
#this thread mentioned a similar issue,
#https://code.google.com/p/marave/issues/detail?id=91
#http://srinikom.github.io/pyside-docs/PySide/QtGui/QFileDialog.html#PySide.QtGui.PySide.QtGui.QFileDialog.getOpenFileName
#which they solved by manually instantiating a QFileDialog
dlg = QtGui.QFileDialog(self.parent(), "Save as", self.last_folder)
dlg.setAcceptMode(QtGui.QFileDialog.AcceptSave)
dlg.setFileMode(QtGui.QFileDialog.AnyFile)
dlg.exec_()
fname = dlg.selectedFiles()[0]
if fname:
print("SAVE LISTS CALLED: %s" % fname)
self.last_folder = os.path.dirname(fname)
tree = self.playlists.root.as_dict()
save_json(fname, tree)
self.playlists.root.save_all()
#self.configs['previously'] = fname
configs.configs['previously'] = fname
self.save_configs()
#print tree
| |
#!/usr/bin/env python
#
# test_command.py - test cases for the generic ReadWriteCommand class
#
# January 2015, Glenn F. Matthews
# Copyright (c) 2015-2017 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Test cases for COT.commands.Command class and generic subclasses."""
import os.path
import mock
from COT.commands.tests.command_testcase import CommandTestCase
from COT.commands import Command, ReadCommand, ReadWriteCommand
from COT.data_validation import InvalidInputError
from COT.vm_description import VMInitError
# pylint: disable=missing-param-doc,missing-type-doc,protected-access
class TestCommand(CommandTestCase):
"""Test cases for Command base class."""
command_class = Command
def test_disk_space_required_zero_if_no_vm(self):
"""Corner case - no VM yet, working_dir_disk_space_required == 0."""
self.assertEqual(self.command.working_dir_disk_space_required(), 0)
def test_check_disk_space_sufficient(self):
"""Positive test for check_disk_space API."""
self.assertTrue(self.command.check_disk_space(1, self.temp_dir))
self.assertTrue(self.command.check_disk_space(
1, self.temp_dir,
label="Hello", context="Contextual detail", die=True))
@mock.patch("COT.commands.command.available_bytes_at_path", return_value=0)
def test_check_disk_space_insufficient(self, mock_available):
"""Negative test for check_disk_space API."""
# If user declines, return False or die
self.command.ui.default_confirm_response = False
self.assertFalse(self.command.check_disk_space(100, self.temp_dir))
mock_available.assert_called_once()
mock_available.reset_mock()
self.command._cached_disk_requirements.clear()
self.assertRaises(SystemExit, self.command.check_disk_space,
100, self.temp_dir, die=True)
mock_available.assert_called_once()
mock_available.reset_mock()
self.command._cached_disk_requirements.clear()
# If user accepts, return True anyways
self.command.ui.default_confirm_response = True
self.assertTrue(self.command.check_disk_space(100, self.temp_dir))
mock_available.assert_called_once()
mock_available.reset_mock()
self.command._cached_disk_requirements.clear()
self.assertTrue(self.command.check_disk_space(100, self.temp_dir,
die=True))
mock_available.assert_called_once()
@mock.patch("COT.commands.command.available_bytes_at_path")
def test_check_disk_space_caching(self, mock_available):
"""Confirm disk space checks are invoked and cached appropriately."""
mock_available.return_value = 50000
val = self.command.check_disk_space(100, __file__)
self.assertTrue(val)
mock_available.assert_called_once_with(os.path.dirname(__file__))
mock_available.reset_mock()
# Checking same path again with different, lower size - no re-check
val = self.command.check_disk_space(50, __file__)
self.assertTrue(val)
mock_available.assert_not_called()
# Checking the same path again with the same size - no re-check
val = self.command.check_disk_space(100, __file__)
self.assertTrue(val)
mock_available.assert_not_called()
# As caching is by directory not by file,
# checking the same directory again with the same size - no re-check
val = self.command.check_disk_space(100, os.path.dirname(__file__))
self.assertTrue(val)
mock_available.assert_not_called()
# Checking same path with increased size - re-check
val = self.command.check_disk_space(200, os.path.dirname(__file__))
self.assertTrue(val)
mock_available.assert_called_once_with(os.path.dirname(__file__))
mock_available.reset_mock()
# Checking different path - re-check
val = self.command.check_disk_space(100, self.input_ovf)
self.assertTrue(val)
mock_available.assert_called_once_with(os.path.dirname(self.input_ovf))
mock_available.reset_mock()
# Explictly forcing re-check
val = self.command.check_disk_space(100, self.input_ovf,
force_check=True)
self.assertTrue(val)
mock_available.assert_called_once_with(os.path.dirname(self.input_ovf))
mock_available.reset_mock()
class TestReadCommand(TestCommand):
"""Test cases for ReadCommand class."""
command_class = ReadCommand
def test_set_package_nonexistent(self):
"""Package setter raises InvalidInputError for nonexistent file."""
with self.assertRaises(InvalidInputError):
self.command.package = "/foo/bar/baz"
def test_not_ready_if_insufficient_working_space(self):
"""Verify ready_to_run() fails if working disk space is lacking."""
self.command.package = self.input_ovf
self.command.ui.default_confirm_response = False
with mock.patch.object(self.command,
'working_dir_disk_space_required',
return_value=(1 << 60)):
ready, reason = self.command.ready_to_run()
self.assertFalse(ready)
self.assertRegex(reason, "Insufficient disk space available for"
" temporary file storage")
# User can opt to continue anyway
self.command.ui.default_confirm_response = True
self.command._cached_disk_requirements.clear()
with mock.patch.object(self.command,
'working_dir_disk_space_required',
return_value=(1 << 60)):
ready, reason = self.command.ready_to_run()
self.assertTrue(ready)
class TestReadWriteCommand(TestReadCommand):
"""Test cases for ReadWriteCommand class."""
command_class = ReadWriteCommand
def test_vmfactory_fail(self):
"""If package/output are unsupported, expect a VMInitError."""
self.command.output = "foo.vmx"
with self.assertRaises(VMInitError):
self.command.package = self.input_ovf
def test_create_subparser_noop(self):
"""The generic class doesn't create a subparser."""
self.command.create_subparser()
def test_not_ready_if_insufficient_output_space(self):
"""Ensure that ready_to_run() fails if output disk space is lacking."""
self.command.package = self.input_ovf
self.command.ui.default_confirm_response = False
# Make working directory requirements negligible but output huge
with mock.patch.object(self.command,
"working_dir_disk_space_required",
return_value=0), \
mock.patch.object(self.command.vm,
'predicted_output_size',
return_value=(1 << 60)):
ready, reason = self.command.ready_to_run()
self.assertFalse(ready)
self.assertRegex(reason, "Insufficient disk space available"
" to guarantee successful output")
# User can opt to continue anyway
self.command.ui.default_confirm_response = True
self.command._cached_disk_requirements.clear()
with mock.patch.object(self.command,
"working_dir_disk_space_required",
return_value=0), \
mock.patch.object(self.command.vm,
'predicted_output_size',
return_value=(1 << 60)):
ready, reason = self.command.ready_to_run()
self.assertTrue(ready)
def test_set_output_invalid(self):
"""Check various failure cases for output setter."""
# Nonexistent output location, regardless of package
with self.assertRaises(InvalidInputError):
self.command.output = "/foo/bar/baz"
self.command.package = self.input_ovf
# Nonexistent output location with package set
with self.assertRaises(InvalidInputError):
self.command.output = "/foo/bar/baz.ova"
# Output to directory instead of file (currently unsupported)
with self.assertRaises(InvalidInputError):
self.command.output = self.temp_dir
# Output to "directory" under a file
with self.assertRaises(InvalidInputError):
self.command.output = os.path.join(self.input_ovf, "foo.ova")
def test_set_output_implicitly(self):
"""If 'output' is not specifically set, run() sets it to 'package'."""
self.command.output = ""
self.command.package = self.input_ovf
self.assertEqual(self.command.output, "")
self.command.run()
self.assertEqual(self.command.output, self.input_ovf)
def test_finished_no_vm(self):
"""Verify that finished() can be successful if no VM was set."""
self.command.finished()
| |
from PyQt5 import QtCore, QtWidgets
import numpy as np
import pyqtgraph as pg
from .ATEMWidget import ATEMWidget
from .GridWorker import GridWorker
from .colormaps import jetCM
class GridWidget(ATEMWidget):
"""docstring for GridWidget"""
def __init__(self, parent):
super(GridWidget, self).__init__(parent)
self.parent = parent
self.gridStore = {}
self.init_grids()
self.init_ui()
if not self.parent.data.has_pred:
self.predPlotWidget.setVisible(False)
self.current_tInd = self.parent.selectedTimeInd
self.absMinValue = 1e-20
self.absMaxValue = 1e20
self.show()
def init_ui(self):
""" Docstring """
# Make the background white
palette = self.palette()
palette.setColor(self.backgroundRole(), QtCore.Qt.white)
self.setPalette(palette)
self.obsPlotWidget = pg.PlotWidget(enableMenu=False)
self.obsPlotWidget.setLabel('left', 'Easting', units='m')
self.obsPlotWidget.setLabel('bottom', 'Northing', units='m')
self.obsPlotWidget.showGrid(x=True, y=True)
self.obsPlotWidget.getViewBox().setAspectLocked()
self.obsPlotWidget.setTitle('Observed')
self.predPlotWidget = pg.PlotWidget(enableMenu=False)
self.predPlotWidget.setLabel('left', 'Easting', units='m')
self.predPlotWidget.setLabel('bottom', 'Northing', units='m')
self.predPlotWidget.showGrid(x=True, y=True)
self.predPlotWidget.getViewBox().setAspectLocked()
self.predPlotWidget.setTitle('Predicted')
self.colorbarWidget = pg.PlotWidget(enableMenu=False)
self.colorbarWidget.setMaximumWidth(40)
self.colorbarWidget.getViewBox().setMouseEnabled(False, False)
self.colorbarWidget.setXRange(0, 20, padding=0)
self.colorbarWidget.setYRange(0, 256, padding=0)
self.colorbarWidget.getAxis('bottom').setPen(None)
self.colorbarWidget.getAxis('left').setPen(None)
self.colorbarWidget.getAxis('left').setWidth(0)
self.colorbarWidget.getAxis('right').setWidth(0)
self.colorbarWidget.getAxis('top').setWidth(0)
self.colorbarWidget.getAxis('bottom').setWidth(0)
self.cbMinLabel = QtWidgets.QLabel()
self.cbMinLabel.setText('')
self.cbMaxLabel = QtWidgets.QLabel()
self.cbMaxLabel.setText('')
self.colorbar = pg.ImageItem()
cbData = np.arange(0, 256)[:, np.newaxis].repeat(20, axis=1).T
self.colorbar.setImage(jetCM[cbData])
self.colorbarWidget.addItem(self.colorbar)
self.locLabel = QtWidgets.QLabel('')
self.locLabel.setAlignment(QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter)
self.obsImage = pg.ImageItem()
self.obsPlotWidget.addItem(self.obsImage)
self.predImage = pg.ImageItem()
self.predPlotWidget.addItem(self.predImage)
locLinePen = {'color':'k', 'width':2, 'style':QtCore.Qt.DotLine}
self.selectedLocVlineObs = pg.InfiniteLine(angle=90, movable=False, pen=locLinePen)
self.selectedLocVlinePred = pg.InfiniteLine(angle=90, movable=False, pen=locLinePen)
self.obsPlotWidget.addItem(self.selectedLocVlineObs, ignoreBounds=True)
self.predPlotWidget.addItem(self.selectedLocVlinePred, ignoreBounds=True)
self.selectedLocHlineObs = pg.InfiniteLine(angle=0, movable=False, pen=locLinePen)
self.selectedLocHlinePred = pg.InfiniteLine(angle=0, movable=False, pen=locLinePen)
self.obsPlotWidget.addItem(self.selectedLocHlineObs, ignoreBounds=True)
self.predPlotWidget.addItem(self.selectedLocHlinePred, ignoreBounds=True)
self.obsPlotWidget.scene().sigMouseClicked.connect(self.clickObsEvent)
self.predPlotWidget.scene().sigMouseClicked.connect(self.clickPredEvent)
self.obsPlotWidget.setXLink(self.predPlotWidget)
self.obsPlotWidget.setYLink(self.predPlotWidget)
self.highSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.highSlider.setMaximum(100)
self.highSlider.setValue(100)
self.highSlider.valueChanged.connect(self.setClim)
self.lowSlider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self.lowSlider.setMaximum(100)
self.lowSlider.setValue(0)
self.lowSlider.valueChanged.connect(self.setClim)
lh = QtWidgets.QHBoxLayout()
lh.addWidget(self.obsPlotWidget)
lh.addWidget(self.predPlotWidget)
cbvLayout = QtWidgets.QVBoxLayout()
cbvLayout.addWidget(self.cbMaxLabel)
cbvLayout.addWidget(self.colorbarWidget)
cbvLayout.addWidget(self.cbMinLabel)
lh.addLayout(cbvLayout)
l = QtWidgets.QVBoxLayout(self)
l.addLayout(lh)
l.addWidget(self.highSlider)
l.addWidget(self.lowSlider)
l.addWidget(self.locLabel)
self.mouseMoveProxyObs = pg.SignalProxy(self.obsPlotWidget.scene().sigMouseMoved,
rateLimit=30, slot=self.mouseMovedEvent)
self.mouseMoveProxyPred = pg.SignalProxy(self.predPlotWidget.scene().sigMouseMoved,
rateLimit=30, slot=self.mouseMovedEvent)
def init_grids(self):
self.gridWorker_Obs = GridWorker(self.parent.data, 'dBdt_Z')
self.gridWorker_Obs.grdOpts['number_cells'] = 256
self.gridWorker_Obs.finishedGrid.connect(self.storeGrid)
self.gridWorker_Obs.start()
if self.parent.data.has_pred:
self.gridWorker_Pred = GridWorker(self.parent.data, 'dBdt_Z_pred')
self.gridWorker_Pred.grdOpts['number_cells'] = 256
self.gridWorker_Pred.finishedGrid.connect(self.storeGrid)
self.gridWorker_Pred.start()
@QtCore.pyqtSlot(dict)
def storeGrid(self, event):
if not event['ch'] in self.gridStore:
self.gridStore[event['ch']] = {}
self.gridStore[event['ch']][event['tInd']] = event
if event['tInd'] == self.current_tInd:
if event['ch'] == 'dBdt_Z':
self.absMinValue = np.nanmin(event['grid'])
self.absMaxValue = np.nanmax(event['grid'])
self.drawObs()
try:
self.drawPred()
except Exception as e:
pass
self.obsPlotWidget.autoRange()
if event['ch'] == 'dBdt_Z_pred':
self.drawPred()
try:
self.drawObs()
except Exception as e:
pass
self.predPlotWidget.autoRange()
def clickObsEvent(self, event):
if self.obsPlotWidget.sceneBoundingRect().contains(event.scenePos()):
mousePoint = self.obsPlotWidget.getViewBox().mapSceneToView(event.scenePos())
signal = {'name':'closestLoc',
'x':mousePoint.x(),
'y':mousePoint.y()}
self.ChangeSelectionSignal.emit(signal)
else:
pass
def clickPredEvent(self, event):
if self.predPlotWidget.sceneBoundingRect().contains(event.scenePos()):
mousePoint = self.predPlotWidget.getViewBox().mapSceneToView(event.scenePos())
signal = {'name':'closestLoc',
'x':mousePoint.x(),
'y':mousePoint.y()}
self.ChangeSelectionSignal.emit(signal)
else:
pass
def mouseMovedEvent(self, pos):
pos = pos[0]
if self.obsPlotWidget.sceneBoundingRect().contains(pos):
mousePoint = self.obsPlotWidget.getViewBox().mapSceneToView(pos)
string = "<span style='font-size: 12pt'>x={:.0f}, y={:.0f}</span>"
self.locLabel.setText(string.format(mousePoint.x(), mousePoint.y()))
elif self.predPlotWidget.sceneBoundingRect().contains(pos):
mousePoint = self.predPlotWidget.getViewBox().mapSceneToView(pos)
string = "<span style='font-size: 12pt'>x={:.0f}, y={:.0f}</span>"
self.locLabel.setText(string.format(mousePoint.x(), mousePoint.y()))
# self.chvLine.setPos(mousePoint.x())
# self.chhLine.setPos(mousePoint.y())
def setClim(self):
lsVal = self.lowSlider.value()
hsVal = self.highSlider.value()
if lsVal >= hsVal:
self.lowSlider.setValue(hsVal-1)
lsVal = self.lowSlider.value()
self.drawObs()
if 'dBdt_Z_pred' in self.gridStore:
self.drawPred()
def getClim(self):
lsVal = self.lowSlider.value()
hsVal = self.highSlider.value()
dv = self.absMaxValue-self.absMinValue
clMin = self.absMinValue+dv*lsVal/100.
clMax = self.absMinValue+dv*hsVal/100.
return clMin, clMax
def setLocation(self, loc):
""" Docstring """
xl = loc.iloc[0].x
yl = loc.iloc[0].y
self.selectedLocVlineObs.setPos(xl)
self.selectedLocHlineObs.setPos(yl)
self.selectedLocVlinePred.setPos(xl)
self.selectedLocHlinePred.setPos(yl)
def setTime(self, data_times):
self.current_tInd = data_times.iloc[0].tInd
if 'dBdt_Z' in self.gridStore:
grid_obs = self.gridStore['dBdt_Z'][self.current_tInd]['grid']
self.absMinValue = np.nanmin(grid_obs)
self.absMaxValue = np.nanmax(grid_obs)
self.drawObs()
if 'dBdt_Z_pred' in self.gridStore:
self.drawPred()
def drawObs(self):
grid_obs = self.gridStore['dBdt_Z'][self.current_tInd]['grid'].T
x_vector = self.gridStore['dBdt_Z'][self.current_tInd]['x_vector']
y_vector = self.gridStore['dBdt_Z'][self.current_tInd]['y_vector']
clMin, clMax = self.getClim()
bins = np.linspace(clMin, clMax, 255)
cInd_obs = np.digitize(grid_obs, bins)
colors_obs = jetCM[cInd_obs]
colors_obs[np.isnan(grid_obs)] = (0, 0, 0, 0)
self.obsImage.setImage(colors_obs)
self.obsImage.setPos(x_vector.min(), y_vector.min())
self.obsImage.setScale(x_vector[1]-x_vector[0])
self.cbMaxLabel.setText('{:.2e}'.format(clMax))
self.cbMinLabel.setText('{:.2e}'.format(clMin))
def drawPred(self):
grid_pred = self.gridStore['dBdt_Z_pred'][self.current_tInd]['grid'].T
x_vector = self.gridStore['dBdt_Z_pred'][self.current_tInd]['x_vector']
y_vector = self.gridStore['dBdt_Z_pred'][self.current_tInd]['y_vector']
if np.any(grid_pred):
clMin, clMax = self.getClim()
bins = np.linspace(clMin, clMax, 255)
cInd_pred = np.digitize(grid_pred, bins)
colors_pred = jetCM[cInd_pred]
colors_pred[np.isnan(grid_pred)] = (0, 0, 0, 0)
self.predImage.setImage(colors_pred)
self.predImage.setPos(x_vector.min(), y_vector.min())
self.predImage.setScale(x_vector[1]-x_vector[0])
else:
self.predImage.setImage(None)
| |
# See http://zulip.readthedocs.io/en/latest/events-system.html for
# high-level documentation on how this system works.
from __future__ import absolute_import
from typing import cast, AbstractSet, Any, Callable, Dict, List, \
Mapping, MutableMapping, Optional, Iterable, Sequence, Set, Text, Union
from django.utils.translation import ugettext as _
from django.conf import settings
from django.utils.timezone import now as timezone_now
from collections import deque
import datetime
import os
import time
import socket
import logging
import ujson
import requests
import atexit
import sys
import signal
import tornado.autoreload
import tornado.ioloop
import random
import traceback
from zerver.models import UserProfile, Client
from zerver.decorator import RespondAsynchronously
from zerver.tornado.handlers import clear_handler_by_id, get_handler_by_id, \
finish_handler, handler_stats_string
from zerver.lib.utils import statsd
from zerver.middleware import async_request_restart
from zerver.lib.narrow import build_narrow_filter
from zerver.lib.queue import queue_json_publish
from zerver.lib.request import JsonableError
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.tornado.descriptors import clear_descriptor_by_handler_id, set_descriptor_by_handler_id
import copy
import six
requests_client = requests.Session()
for host in ['127.0.0.1', 'localhost']:
if settings.TORNADO_SERVER and host in settings.TORNADO_SERVER:
# This seems like the only working solution to ignore proxy in
# requests library.
requests_client.trust_env = False
# The idle timeout used to be a week, but we found that in that
# situation, queues from dead browser sessions would grow quite large
# due to the accumulation of message data in those queues.
IDLE_EVENT_QUEUE_TIMEOUT_SECS = 60 * 10
EVENT_QUEUE_GC_FREQ_MSECS = 1000 * 60 * 5
# Capped limit for how long a client can request an event queue
# to live
MAX_QUEUE_TIMEOUT_SECS = 7 * 24 * 60 * 60
# The heartbeats effectively act as a server-side timeout for
# get_events(). The actual timeout value is randomized for each
# client connection based on the below value. We ensure that the
# maximum timeout value is 55 seconds, to deal with crappy home
# wireless routers that kill "inactive" http connections.
HEARTBEAT_MIN_FREQ_SECS = 45
class ClientDescriptor(object):
def __init__(self, user_profile_id, user_profile_email, realm_id, event_queue,
event_types, client_type_name, apply_markdown=True,
all_public_streams=False, lifespan_secs=0, narrow=[]):
# type: (int, Text, int, EventQueue, Optional[Sequence[str]], Text, bool, bool, int, Iterable[Sequence[Text]]) -> None
# These objects are serialized on shutdown and restored on restart.
# If fields are added or semantics are changed, temporary code must be
# added to load_event_queues() to update the restored objects.
# Additionally, the to_dict and from_dict methods must be updated
self.user_profile_id = user_profile_id
self.user_profile_email = user_profile_email
self.realm_id = realm_id
self.current_handler_id = None # type: Optional[int]
self.current_client_name = None # type: Optional[Text]
self.event_queue = event_queue
self.queue_timeout = lifespan_secs
self.event_types = event_types
self.last_connection_time = time.time()
self.apply_markdown = apply_markdown
self.all_public_streams = all_public_streams
self.client_type_name = client_type_name
self._timeout_handle = None # type: Any # TODO: should be return type of ioloop.add_timeout
self.narrow = narrow
self.narrow_filter = build_narrow_filter(narrow)
# Clamp queue_timeout to between minimum and maximum timeouts
self.queue_timeout = max(IDLE_EVENT_QUEUE_TIMEOUT_SECS, min(self.queue_timeout, MAX_QUEUE_TIMEOUT_SECS))
def to_dict(self):
# type: () -> Dict[str, Any]
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
return dict(user_profile_id=self.user_profile_id,
user_profile_email=self.user_profile_email,
realm_id=self.realm_id,
event_queue=self.event_queue.to_dict(),
queue_timeout=self.queue_timeout,
event_types=self.event_types,
last_connection_time=self.last_connection_time,
apply_markdown=self.apply_markdown,
all_public_streams=self.all_public_streams,
narrow=self.narrow,
client_type_name=self.client_type_name)
def __repr__(self):
# type: () -> str
return "ClientDescriptor<%s>" % (self.event_queue.id,)
@classmethod
def from_dict(cls, d):
# type: (MutableMapping[str, Any]) -> ClientDescriptor
if 'user_profile_email' not in d:
# Temporary migration for the addition of the new user_profile_email field
from zerver.models import get_user_profile_by_id
d['user_profile_email'] = get_user_profile_by_id(d['user_profile_id']).email
if 'client_type' in d:
# Temporary migration for the rename of client_type to client_type_name
d['client_type_name'] = d['client_type']
ret = cls(d['user_profile_id'], d['user_profile_email'], d['realm_id'],
EventQueue.from_dict(d['event_queue']), d['event_types'],
d['client_type_name'], d['apply_markdown'], d['all_public_streams'],
d['queue_timeout'], d.get('narrow', []))
ret.last_connection_time = d['last_connection_time']
return ret
def prepare_for_pickling(self):
# type: () -> None
self.current_handler_id = None
self._timeout_handle = None
def add_event(self, event):
# type: (Dict[str, Any]) -> None
if self.current_handler_id is not None:
handler = get_handler_by_id(self.current_handler_id)
async_request_restart(handler._request)
self.event_queue.push(event)
self.finish_current_handler()
def finish_current_handler(self):
# type: () -> bool
if self.current_handler_id is not None:
err_msg = "Got error finishing handler for queue %s" % (self.event_queue.id,)
try:
finish_handler(self.current_handler_id, self.event_queue.id,
self.event_queue.contents(), self.apply_markdown)
except Exception:
logging.exception(err_msg)
finally:
self.disconnect_handler()
return True
return False
def accepts_event(self, event):
# type: (Mapping[str, Any]) -> bool
if self.event_types is not None and event["type"] not in self.event_types:
return False
if event["type"] == "message":
return self.narrow_filter(event)
return True
# TODO: Refactor so we don't need this function
def accepts_messages(self):
# type: () -> bool
return self.event_types is None or "message" in self.event_types
def idle(self, now):
# type: (float) -> bool
if not hasattr(self, 'queue_timeout'):
self.queue_timeout = IDLE_EVENT_QUEUE_TIMEOUT_SECS
return (self.current_handler_id is None and
now - self.last_connection_time >= self.queue_timeout)
def connect_handler(self, handler_id, client_name):
# type: (int, Text) -> None
self.current_handler_id = handler_id
self.current_client_name = client_name
set_descriptor_by_handler_id(handler_id, self)
self.last_connection_time = time.time()
def timeout_callback():
# type: () -> None
self._timeout_handle = None
# All clients get heartbeat events
self.add_event(dict(type='heartbeat'))
ioloop = tornado.ioloop.IOLoop.instance()
heartbeat_time = time.time() + HEARTBEAT_MIN_FREQ_SECS + random.randint(0, 10)
if self.client_type_name != 'API: heartbeat test':
self._timeout_handle = ioloop.add_timeout(heartbeat_time, timeout_callback)
def disconnect_handler(self, client_closed=False):
# type: (bool) -> None
if self.current_handler_id:
clear_descriptor_by_handler_id(self.current_handler_id, None)
clear_handler_by_id(self.current_handler_id)
if client_closed:
logging.info("Client disconnected for queue %s (%s via %s)" %
(self.event_queue.id, self.user_profile_email,
self.current_client_name))
self.current_handler_id = None
self.current_client_name = None
if self._timeout_handle is not None:
ioloop = tornado.ioloop.IOLoop.instance()
ioloop.remove_timeout(self._timeout_handle)
self._timeout_handle = None
def cleanup(self):
# type: () -> None
# Before we can GC the event queue, we need to disconnect the
# handler and notify the client (or connection server) so that
# they can cleanup their own state related to the GC'd event
# queue. Finishing the handler before we GC ensures the
# invariant that event queues are idle when passed to
# `do_gc_event_queues` is preserved.
self.finish_current_handler()
do_gc_event_queues({self.event_queue.id}, {self.user_profile_id},
{self.realm_id})
def compute_full_event_type(event):
# type: (Mapping[str, Any]) -> str
if event["type"] == "update_message_flags":
if event["all"]:
# Put the "all" case in its own category
return "all_flags/%s/%s" % (event["flag"], event["operation"])
return "flags/%s/%s" % (event["operation"], event["flag"])
return event["type"]
class EventQueue(object):
def __init__(self, id):
# type: (str) -> None
self.queue = deque() # type: deque[Dict[str, Any]]
self.next_event_id = 0 # type: int
self.id = id # type: str
self.virtual_events = {} # type: Dict[str, Dict[str, Any]]
def to_dict(self):
# type: () -> Dict[str, Any]
# If you add a new key to this dict, make sure you add appropriate
# migration code in from_dict or load_event_queues to account for
# loading event queues that lack that key.
return dict(id=self.id,
next_event_id=self.next_event_id,
queue=list(self.queue),
virtual_events=self.virtual_events)
@classmethod
def from_dict(cls, d):
# type: (Dict[str, Any]) -> EventQueue
ret = cls(d['id'])
ret.next_event_id = d['next_event_id']
ret.queue = deque(d['queue'])
ret.virtual_events = d.get("virtual_events", {})
return ret
def push(self, event):
# type: (Dict[str, Any]) -> None
event['id'] = self.next_event_id
self.next_event_id += 1
full_event_type = compute_full_event_type(event)
if (full_event_type in ["pointer", "restart"] or
full_event_type.startswith("flags/")):
if full_event_type not in self.virtual_events:
self.virtual_events[full_event_type] = copy.deepcopy(event)
return
# Update the virtual event with the values from the event
virtual_event = self.virtual_events[full_event_type]
virtual_event["id"] = event["id"]
if "timestamp" in event:
virtual_event["timestamp"] = event["timestamp"]
if full_event_type == "pointer":
virtual_event["pointer"] = event["pointer"]
elif full_event_type == "restart":
virtual_event["server_generation"] = event["server_generation"]
elif full_event_type.startswith("flags/"):
virtual_event["messages"] += event["messages"]
else:
self.queue.append(event)
# Note that pop ignores virtual events. This is fine in our
# current usage since virtual events should always be resolved to
# a real event before being given to users.
def pop(self):
# type: () -> Dict[str, Any]
return self.queue.popleft()
def empty(self):
# type: () -> bool
return len(self.queue) == 0 and len(self.virtual_events) == 0
# See the comment on pop; that applies here as well
def prune(self, through_id):
# type: (int) -> None
while len(self.queue) != 0 and self.queue[0]['id'] <= through_id:
self.pop()
def contents(self):
# type: () -> List[Dict[str, Any]]
contents = [] # type: List[Dict[str, Any]]
virtual_id_map = {} # type: Dict[str, Dict[str, Any]]
for event_type in self.virtual_events:
virtual_id_map[self.virtual_events[event_type]["id"]] = self.virtual_events[event_type]
virtual_ids = sorted(list(virtual_id_map.keys()))
# Merge the virtual events into their final place in the queue
index = 0
length = len(virtual_ids)
for event in self.queue:
while index < length and virtual_ids[index] < event["id"]:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
contents.append(event)
while index < length:
contents.append(virtual_id_map[virtual_ids[index]])
index += 1
self.virtual_events = {}
self.queue = deque(contents)
return contents
# maps queue ids to client descriptors
clients = {} # type: Dict[str, ClientDescriptor]
# maps user id to list of client descriptors
user_clients = {} # type: Dict[int, List[ClientDescriptor]]
# maps realm id to list of client descriptors with all_public_streams=True
realm_clients_all_streams = {} # type: Dict[int, List[ClientDescriptor]]
# list of registered gc hooks.
# each one will be called with a user profile id, queue, and bool
# last_for_client that is true if this is the last queue pertaining
# to this user_profile_id
# that is about to be deleted
gc_hooks = [] # type: List[Callable[[int, ClientDescriptor, bool], None]]
next_queue_id = 0
def add_client_gc_hook(hook):
# type: (Callable[[int, ClientDescriptor, bool], None]) -> None
gc_hooks.append(hook)
def get_client_descriptor(queue_id):
# type: (str) -> ClientDescriptor
return clients.get(queue_id)
def get_client_descriptors_for_user(user_profile_id):
# type: (int) -> List[ClientDescriptor]
return user_clients.get(user_profile_id, [])
def get_client_descriptors_for_realm_all_streams(realm_id):
# type: (int) -> List[ClientDescriptor]
return realm_clients_all_streams.get(realm_id, [])
def add_to_client_dicts(client):
# type: (ClientDescriptor) -> None
user_clients.setdefault(client.user_profile_id, []).append(client)
if client.all_public_streams or client.narrow != []:
realm_clients_all_streams.setdefault(client.realm_id, []).append(client)
def allocate_client_descriptor(new_queue_data):
# type: (MutableMapping[str, Any]) -> ClientDescriptor
global next_queue_id
queue_id = str(settings.SERVER_GENERATION) + ':' + str(next_queue_id)
next_queue_id += 1
new_queue_data["event_queue"] = EventQueue(queue_id).to_dict()
client = ClientDescriptor.from_dict(new_queue_data)
clients[queue_id] = client
add_to_client_dicts(client)
return client
def do_gc_event_queues(to_remove, affected_users, affected_realms):
# type: (AbstractSet[str], AbstractSet[int], AbstractSet[int]) -> None
def filter_client_dict(client_dict, key):
# type: (MutableMapping[int, List[ClientDescriptor]], int) -> None
if key not in client_dict:
return
new_client_list = [c for c in client_dict[key] if c.event_queue.id not in to_remove]
if len(new_client_list) == 0:
del client_dict[key]
else:
client_dict[key] = new_client_list
for user_id in affected_users:
filter_client_dict(user_clients, user_id)
for realm_id in affected_realms:
filter_client_dict(realm_clients_all_streams, realm_id)
for id in to_remove:
for cb in gc_hooks:
cb(clients[id].user_profile_id, clients[id], clients[id].user_profile_id not in user_clients)
del clients[id]
def gc_event_queues():
# type: () -> None
start = time.time()
to_remove = set() # type: Set[str]
affected_users = set() # type: Set[int]
affected_realms = set() # type: Set[int]
for (id, client) in six.iteritems(clients):
if client.idle(start):
to_remove.add(id)
affected_users.add(client.user_profile_id)
affected_realms.add(client.realm_id)
# We don't need to call e.g. finish_current_handler on the clients
# being removed because they are guaranteed to be idle and thus
# not have a current handler.
do_gc_event_queues(to_remove, affected_users, affected_realms)
logging.info(('Tornado removed %d idle event queues owned by %d users in %.3fs.' +
' Now %d active queues, %s')
% (len(to_remove), len(affected_users), time.time() - start,
len(clients), handler_stats_string()))
statsd.gauge('tornado.active_queues', len(clients))
statsd.gauge('tornado.active_users', len(user_clients))
def dump_event_queues():
# type: () -> None
start = time.time()
with open(settings.JSON_PERSISTENT_QUEUE_FILENAME, "w") as stored_queues:
ujson.dump([(qid, client.to_dict()) for (qid, client) in six.iteritems(clients)],
stored_queues)
logging.info('Tornado dumped %d event queues in %.3fs'
% (len(clients), time.time() - start))
def load_event_queues():
# type: () -> None
global clients
start = time.time()
# ujson chokes on bad input pretty easily. We separate out the actual
# file reading from the loading so that we don't silently fail if we get
# bad input.
try:
with open(settings.JSON_PERSISTENT_QUEUE_FILENAME, "r") as stored_queues:
json_data = stored_queues.read()
try:
clients = dict((qid, ClientDescriptor.from_dict(client))
for (qid, client) in ujson.loads(json_data))
except Exception:
logging.exception("Could not deserialize event queues")
except (IOError, EOFError):
pass
for client in six.itervalues(clients):
# Put code for migrations due to event queue data format changes here
add_to_client_dicts(client)
logging.info('Tornado loaded %d event queues in %.3fs'
% (len(clients), time.time() - start))
def send_restart_events(immediate=False):
# type: (bool) -> None
event = dict(type='restart', server_generation=settings.SERVER_GENERATION) # type: Dict[str, Any]
if immediate:
event['immediate'] = True
for client in six.itervalues(clients):
if client.accepts_event(event):
client.add_event(event.copy())
def setup_event_queue():
# type: () -> None
if not settings.TEST_SUITE:
load_event_queues()
atexit.register(dump_event_queues)
# Make sure we dump event queues even if we exit via signal
signal.signal(signal.SIGTERM, lambda signum, stack: sys.exit(1)) # type: ignore # https://github.com/python/mypy/issues/2955
tornado.autoreload.add_reload_hook(dump_event_queues) # type: ignore # TODO: Fix missing tornado.autoreload stub
try:
os.rename(settings.JSON_PERSISTENT_QUEUE_FILENAME, "/var/tmp/event_queues.json.last")
except OSError:
pass
# Set up event queue garbage collection
ioloop = tornado.ioloop.IOLoop.instance()
pc = tornado.ioloop.PeriodicCallback(gc_event_queues,
EVENT_QUEUE_GC_FREQ_MSECS, ioloop)
pc.start()
send_restart_events(immediate=settings.DEVELOPMENT)
def fetch_events(query):
# type: (Mapping[str, Any]) -> Dict[str, Any]
queue_id = query["queue_id"] # type: str
dont_block = query["dont_block"] # type: bool
last_event_id = query["last_event_id"] # type: int
user_profile_id = query["user_profile_id"] # type: int
new_queue_data = query.get("new_queue_data") # type: Optional[MutableMapping[str, Any]]
user_profile_email = query["user_profile_email"] # type: Text
client_type_name = query["client_type_name"] # type: Text
handler_id = query["handler_id"] # type: int
try:
was_connected = False
orig_queue_id = queue_id
extra_log_data = ""
if queue_id is None:
if dont_block:
client = allocate_client_descriptor(new_queue_data)
queue_id = client.event_queue.id
else:
raise JsonableError(_("Missing 'queue_id' argument"))
else:
if last_event_id is None:
raise JsonableError(_("Missing 'last_event_id' argument"))
client = get_client_descriptor(queue_id)
if client is None:
raise JsonableError(_("Bad event queue id: %s") % (queue_id,))
if user_profile_id != client.user_profile_id:
raise JsonableError(_("You are not authorized to get events from this queue"))
client.event_queue.prune(last_event_id)
was_connected = client.finish_current_handler()
if not client.event_queue.empty() or dont_block:
response = dict(events=client.event_queue.contents(),
handler_id=handler_id) # type: Dict[str, Any]
if orig_queue_id is None:
response['queue_id'] = queue_id
if len(response["events"]) == 1:
extra_log_data = "[%s/%s/%s]" % (queue_id, len(response["events"]),
response["events"][0]["type"])
else:
extra_log_data = "[%s/%s]" % (queue_id, len(response["events"]))
if was_connected:
extra_log_data += " [was connected]"
return dict(type="response", response=response, extra_log_data=extra_log_data)
# After this point, dont_block=False, the queue is empty, and we
# have a pre-existing queue, so we wait for new events.
if was_connected:
logging.info("Disconnected handler for queue %s (%s/%s)" % (queue_id, user_profile_email,
client_type_name))
except JsonableError as e:
if hasattr(e, 'to_json_error_msg') and callable(e.to_json_error_msg):
return dict(type="error", handler_id=handler_id,
message=e.to_json_error_msg())
raise e
client.connect_handler(handler_id, client_type_name)
return dict(type="async")
# The following functions are called from Django
# Workaround to support the Python-requests 1.0 transition of .json
# from a property to a function
requests_json_is_function = callable(requests.Response.json)
def extract_json_response(resp):
# type: (requests.Response) -> Dict[str, Any]
if requests_json_is_function:
return resp.json()
else:
return resp.json # type: ignore # mypy trusts the stub, not the runtime type checking of this fn
def request_event_queue(user_profile, user_client, apply_markdown,
queue_lifespan_secs, event_types=None, all_public_streams=False,
narrow=[]):
# type: (UserProfile, Client, bool, int, Optional[Iterable[str]], bool, Iterable[Sequence[Text]]) -> Optional[str]
if settings.TORNADO_SERVER:
req = {'dont_block': 'true',
'apply_markdown': ujson.dumps(apply_markdown),
'all_public_streams': ujson.dumps(all_public_streams),
'client': 'internal',
'user_client': user_client.name,
'narrow': ujson.dumps(narrow),
'lifespan_secs': queue_lifespan_secs}
if event_types is not None:
req['event_types'] = ujson.dumps(event_types)
try:
resp = requests_client.get(settings.TORNADO_SERVER + '/api/v1/events',
auth=requests.auth.HTTPBasicAuth(
user_profile.email, user_profile.api_key),
params=req)
except requests.adapters.ConnectionError:
logging.error('Tornado server does not seem to be running, check %s '
'and %s for more information.' %
(settings.ERROR_FILE_LOG_PATH, "tornado.log"))
raise requests.adapters.ConnectionError(
"Django cannot connect to Tornado server (%s); try restarting" %
(settings.TORNADO_SERVER))
resp.raise_for_status()
return extract_json_response(resp)['queue_id']
return None
def get_user_events(user_profile, queue_id, last_event_id):
# type: (UserProfile, str, int) -> List[Dict]
if settings.TORNADO_SERVER:
resp = requests_client.get(settings.TORNADO_SERVER + '/api/v1/events',
auth=requests.auth.HTTPBasicAuth(
user_profile.email, user_profile.api_key),
params={'queue_id': queue_id,
'last_event_id': last_event_id,
'dont_block': 'true',
'client': 'internal'})
resp.raise_for_status()
return extract_json_response(resp)['events']
return []
# Send email notifications to idle users
# after they are idle for 1 hour
NOTIFY_AFTER_IDLE_HOURS = 1
def build_offline_notification(user_profile_id, message_id):
# type: (int, int) -> Dict[str, Any]
return {"user_profile_id": user_profile_id,
"message_id": message_id,
"timestamp": time.time()}
def missedmessage_hook(user_profile_id, queue, last_for_client):
# type: (int, ClientDescriptor, bool) -> None
# Only process missedmessage hook when the last queue for a
# client has been garbage collected
if not last_for_client:
return
message_ids_to_notify = [] # type: List[Dict[str, Any]]
for event in queue.event_queue.contents():
if not event['type'] == 'message' or not event['flags']:
continue
if 'mentioned' in event['flags'] and 'read' not in event['flags']:
notify_info = dict(message_id=event['message']['id'])
if not event.get('push_notified', False):
notify_info['send_push'] = True
if not event.get('email_notified', False):
notify_info['send_email'] = True
message_ids_to_notify.append(notify_info)
for notify_info in message_ids_to_notify:
msg_id = notify_info['message_id']
notice = build_offline_notification(user_profile_id, msg_id)
if notify_info.get('send_push', False):
queue_json_publish("missedmessage_mobile_notifications", notice, lambda notice: None)
if notify_info.get('send_email', False):
queue_json_publish("missedmessage_emails", notice, lambda notice: None)
def receiver_is_idle(user_profile_id, realm_presences):
# type: (int, Optional[Dict[int, Dict[Text, Dict[str, Any]]]]) -> bool
# If a user has no message-receiving event queues, they've got no open zulip
# session so we notify them
all_client_descriptors = get_client_descriptors_for_user(user_profile_id)
message_event_queues = [client for client in all_client_descriptors if client.accepts_messages()]
off_zulip = len(message_event_queues) == 0
# It's possible a recipient is not in the realm of a sender. We don't have
# presence information in this case (and it's hard to get without an additional
# db query) so we simply don't try to guess if this cross-realm recipient
# has been idle for too long
if realm_presences is None or user_profile_id not in realm_presences:
return off_zulip
# We want to find the newest "active" presence entity and compare that to the
# activity expiry threshold.
user_presence = realm_presences[user_profile_id]
latest_active_timestamp = None
idle = False
for client, status in six.iteritems(user_presence):
if (latest_active_timestamp is None or status['timestamp'] > latest_active_timestamp) and \
status['status'] == 'active':
latest_active_timestamp = status['timestamp']
if latest_active_timestamp is None:
idle = True
else:
active_datetime = timestamp_to_datetime(latest_active_timestamp)
# 140 seconds is consistent with presence.js:OFFLINE_THRESHOLD_SECS
idle = timezone_now() - active_datetime > datetime.timedelta(seconds=140)
return off_zulip or idle
def process_message_event(event_template, users):
# type: (Mapping[str, Any], Iterable[Mapping[str, Any]]) -> None
realm_presences = {int(k): v for k, v in event_template['presences'].items()} # type: Dict[int, Dict[Text, Dict[str, Any]]]
sender_queue_id = event_template.get('sender_queue_id', None) # type: Optional[str]
message_dict_markdown = event_template['message_dict_markdown'] # type: Dict[str, Any]
message_dict_no_markdown = event_template['message_dict_no_markdown'] # type: Dict[str, Any]
sender_id = message_dict_markdown['sender_id'] # type: int
message_id = message_dict_markdown['id'] # type: int
message_type = message_dict_markdown['type'] # type: str
sending_client = message_dict_markdown['client'] # type: Text
# To remove duplicate clients: Maps queue ID to {'client': Client, 'flags': flags}
send_to_clients = {} # type: Dict[str, Dict[str, Any]]
# Extra user-specific data to include
extra_user_data = {} # type: Dict[int, Any]
if 'stream_name' in event_template and not event_template.get("invite_only"):
for client in get_client_descriptors_for_realm_all_streams(event_template['realm_id']):
send_to_clients[client.event_queue.id] = {'client': client, 'flags': None}
if sender_queue_id is not None and client.event_queue.id == sender_queue_id:
send_to_clients[client.event_queue.id]['is_sender'] = True
for user_data in users:
user_profile_id = user_data['id'] # type: int
flags = user_data.get('flags', []) # type: Iterable[str]
for client in get_client_descriptors_for_user(user_profile_id):
send_to_clients[client.event_queue.id] = {'client': client, 'flags': flags}
if sender_queue_id is not None and client.event_queue.id == sender_queue_id:
send_to_clients[client.event_queue.id]['is_sender'] = True
# If the recipient was offline and the message was a single or group PM to him
# or she was @-notified potentially notify more immediately
received_pm = message_type == "private" and user_profile_id != sender_id
mentioned = 'mentioned' in flags
idle = receiver_is_idle(user_profile_id, realm_presences)
always_push_notify = user_data.get('always_push_notify', False)
if (received_pm or mentioned) and (idle or always_push_notify):
notice = build_offline_notification(user_profile_id, message_id)
queue_json_publish("missedmessage_mobile_notifications", notice, lambda notice: None)
notified = dict(push_notified=True) # type: Dict[str, bool]
# Don't send missed message emails if always_push_notify is True
if idle:
# We require RabbitMQ to do this, as we can't call the email handler
# from the Tornado process. So if there's no rabbitmq support do nothing
queue_json_publish("missedmessage_emails", notice, lambda notice: None)
notified['email_notified'] = True
extra_user_data[user_profile_id] = notified
for client_data in six.itervalues(send_to_clients):
client = client_data['client']
flags = client_data['flags']
is_sender = client_data.get('is_sender', False) # type: bool
extra_data = extra_user_data.get(client.user_profile_id, None) # type: Optional[Mapping[str, bool]]
if not client.accepts_messages():
# The actual check is the accepts_event() check below;
# this line is just an optimization to avoid copying
# message data unnecessarily
continue
if client.apply_markdown:
message_dict = message_dict_markdown
else:
message_dict = message_dict_no_markdown
# Make sure Zephyr mirroring bots know whether stream is invite-only
if "mirror" in client.client_type_name and event_template.get("invite_only"):
message_dict = message_dict.copy()
message_dict["invite_only_stream"] = True
if flags is not None:
message_dict['is_mentioned'] = 'mentioned' in flags
user_event = dict(type='message', message=message_dict, flags=flags) # type: Dict[str, Any]
if extra_data is not None:
user_event.update(extra_data)
if is_sender:
local_message_id = event_template.get('local_id', None)
if local_message_id is not None:
user_event["local_message_id"] = local_message_id
if not client.accepts_event(user_event):
continue
# The below prevents (Zephyr) mirroring loops.
if ('mirror' in sending_client and
sending_client.lower() == client.client_type_name.lower()):
continue
client.add_event(user_event)
def process_event(event, users):
# type: (Mapping[str, Any], Iterable[int]) -> None
for user_profile_id in users:
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(event):
client.add_event(dict(event))
def process_userdata_event(event_template, users):
# type: (Mapping[str, Any], Iterable[Mapping[str, Any]]) -> None
for user_data in users:
user_profile_id = user_data['id']
user_event = dict(event_template) # shallow copy, but deep enough for our needs
for key in user_data.keys():
if key != "id":
user_event[key] = user_data[key]
for client in get_client_descriptors_for_user(user_profile_id):
if client.accepts_event(user_event):
client.add_event(user_event)
def process_notification(notice):
# type: (Mapping[str, Any]) -> None
event = notice['event'] # type: Mapping[str, Any]
users = notice['users'] # type: Union[Iterable[int], Iterable[Mapping[str, Any]]]
if event['type'] in ["update_message"]:
process_userdata_event(event, cast(Iterable[Mapping[str, Any]], users))
elif event['type'] == "message":
process_message_event(event, cast(Iterable[Mapping[str, Any]], users))
else:
process_event(event, cast(Iterable[int], users))
# Runs in the Django process to send a notification to Tornado.
#
# We use JSON rather than bare form parameters, so that we can represent
# different types and for compatibility with non-HTTP transports.
def send_notification_http(data):
# type: (Mapping[str, Any]) -> None
if settings.TORNADO_SERVER and not settings.RUNNING_INSIDE_TORNADO:
requests_client.post(settings.TORNADO_SERVER + '/notify_tornado', data=dict(
data = ujson.dumps(data),
secret = settings.SHARED_SECRET))
else:
process_notification(data)
def send_notification(data):
# type: (Mapping[str, Any]) -> None
queue_json_publish("notify_tornado", data, send_notification_http)
def send_event(event, users):
# type: (Mapping[str, Any], Union[Iterable[int], Iterable[Mapping[str, Any]]]) -> None
"""`users` is a list of user IDs, or in the case of `message` type
events, a list of dicts describing the users and metadata about
the user/message pair."""
queue_json_publish("notify_tornado",
dict(event=event, users=users),
send_notification_http)
| |
from common_fixtures import * # NOQA
from cattle import ApiError
@pytest.fixture(scope='module')
def nsp(super_client, sim_context):
nsp = create_agent_instance_nsp(super_client, sim_context)
create_and_activate(super_client, 'networkService',
networkServiceProviderId=nsp.id,
networkId=nsp.networkId)
return nsp
def random_str():
return 'random{0}'.format(random_num())
def create_env_and_svc(super_client, admin_client, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid}
service = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service = super_client.wait_success(service)
assert service.state == "inactive"
return service, env
def test_activate_single_service(super_client, admin_client, sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
host = sim_context['host']
container1 = admin_client.create_container(imageUuid=image_uuid,
startOnCreate=False)
container1 = admin_client.wait_success(container1)
container2 = admin_client.create_container(imageUuid=image_uuid,
startOnCreate=False)
container2 = admin_client.wait_success(container2)
caps = ["SYS_MODULE"]
restart_policy = {"maximumRetryCount": 2, "name": "on-failure"}
dns = ['8.8.8.8', '1.2.3.4']
launch_config = {"imageUuid": image_uuid}
consumed_service = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
consumed_service = super_client.wait_success(consumed_service)
reg_cred = _create_registry_credential(admin_client)
launch_config = {"imageUuid": image_uuid,
"command": ['sleep', '42'],
"environment": {'TEST_FILE': "/etc/testpath.conf"},
"ports": ['8081', '8082/tcp'],
"dataVolumes": ['/foo'],
"dataVolumesFrom": [container1.id],
"capAdd": caps,
"capDrop": caps,
"dnsSearch": dns,
"dns": dns,
"privileged": True,
"domainName": "rancher.io",
"memory": 8000000,
"stdinOpen": True,
"tty": True,
"entryPoint": ["/bin/sh", "-c"],
"cpuShares": 400,
"cpuSet": "2",
"restartPolicy": restart_policy,
"directory": "/",
"hostname": "test",
"user": "test",
"instanceLinks": {
'container2_link':
container2.id},
"registryCredentialId": reg_cred.id,
"requestedHostId": host.id}
service = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service = super_client.wait_success(service)
# validate that parameters were set for service
assert service.state == "inactive"
assert service.launchConfig.imageUuid == image_uuid
assert service.launchConfig.command == ['sleep', '42']
assert len(service.launchConfig.environment) == 1
assert len(service.launchConfig.ports) == 2
assert len(service.launchConfig.dataVolumes) == 1
# assert set(service.launchConfig.dataVolumesFrom) == set([container1.id])
assert service.launchConfig.capAdd == caps
assert service.launchConfig.capDrop == caps
assert service.launchConfig.dns == dns
assert service.launchConfig.dnsSearch == dns
assert service.launchConfig.privileged is True
assert service.launchConfig.domainName == "rancher.io"
assert service.launchConfig.memory == 8000000
assert service.launchConfig.stdinOpen is True
assert service.launchConfig.tty is True
assert service.launchConfig.entryPoint == ["/bin/sh", "-c"]
assert service.launchConfig.cpuShares == 400
assert service.launchConfig.restartPolicy == restart_policy
assert service.launchConfig.directory == "/"
assert service.launchConfig.hostname == "test"
assert service.launchConfig.user == "test"
assert len(service.launchConfig.instanceLinks) == 1
assert service.kind == "service"
# assert service.launchConfig.registryCredentialId == reg_cred.id
# activate the service and validate that parameters were set for instance
service = wait_success(super_client, service.activate(), 120)
assert service.state == "active"
instance_service_map = super_client. \
list_serviceExposeMap(serviceId=service.id)
assert len(instance_service_map) == 1
wait_for_condition(
super_client, instance_service_map[0], _resource_is_active,
lambda x: 'State is: ' + x.state)
instances = super_client. \
list_container(name=env.name + "_" + service.name + "_" + "1")
assert len(instances) == 1
container = instances[0]
assert container.imageUuid == image_uuid
assert container.command == ['sleep', '42']
assert len(container.instanceLinks()) == 1
assert len(container.environment) == 1
assert len(container.ports()) == 2
assert len(container.dataVolumes) == 1
assert set(container.dataVolumesFrom) == set([container1.id])
assert container.capAdd == caps
assert container.capDrop == caps
assert container.dns == dns
assert container.dnsSearch == dns
assert container.privileged is True
assert container.domainName == "rancher.io"
assert container.memory == 8000000
assert container.stdinOpen is True
assert container.tty is True
assert container.entryPoint == ["/bin/sh", "-c"]
assert container.cpuShares == 400
assert container.restartPolicy == restart_policy
assert container.directory == "/"
assert container.hostname == "test"
assert container.user == "test"
assert container.state == "running"
assert container.registryCredentialId == reg_cred.id
assert container.cpuSet == "2"
assert container.requestedHostId == host.id
def test_activate_services(super_client, admin_client, sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid}
service1 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service1 = super_client.wait_success(service1)
assert service1.state == "inactive"
service2 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service2 = super_client.wait_success(service2)
assert service2.state == "inactive"
env = env.activateservices()
service1 = super_client.wait_success(service1, 120)
service2 = super_client.wait_success(service2, 120)
assert service1.state == "active"
assert service2.state == "active"
def _validate_instance_stopped(service, super_client, env):
instances = super_client. \
list_container(name=env.name + "_" + service.name + "_" + "1")
assert len(instances) == 1
instance = instances[0]
wait_for_condition(
super_client, instance, _resource_is_stopped,
lambda x: 'State is: ' + x.state)
def _validate_compose_instance_removed(super_client, service, env, number="1"):
instances = super_client. \
list_container(name=env.name + "_" + service.name + "_" + number)
assert len(instances) == 1
instance = instances[0]
wait_for_condition(
super_client, instance, _resource_is_removed,
lambda x: 'State is: ' + x.state)
def _validate_instance_removed(super_client, service, name):
instances = super_client. \
list_container(name=name)
assert len(instances) == 1
instance = instances[0]
wait_for_condition(
super_client, instance, _resource_is_removed,
lambda x: 'State is: ' + x.state)
def test_deactivate_remove_service(super_client, admin_client,
sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid}
service = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service = super_client.wait_success(service)
assert service.state == "inactive"
service = wait_success(super_client, service.activate(), 120)
assert service.state == "active"
instance_service_map = super_client. \
list_serviceExposeMap(serviceId=service.id)
assert len(instance_service_map) == 1
wait_for_condition(
super_client, instance_service_map[0], _resource_is_active,
lambda x: 'State is: ' + x.state)
_validate_compose_instance_start(super_client, service, env, "1")
# deactivate service
service = wait_success(super_client, service.deactivate())
assert service.state == "inactive"
_validate_instance_stopped(service, super_client, env)
# remove service
service = wait_success(super_client, service.remove())
_validate_compose_instance_removed(super_client, service, env)
def test_env_deactivate_services(super_client, admin_client, sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid}
service1 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service1 = super_client.wait_success(service1)
assert service1.state == "inactive"
service2 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service2 = super_client.wait_success(service2)
assert service2.state == "inactive"
# activate services
env = env.activateservices()
service1 = super_client.wait_success(service1, 120)
service2 = super_client.wait_success(service2, 120)
assert service1.state == "active"
assert service2.state == "active"
# deactivate services
env.deactivateservices()
service1 = super_client.wait_success(service1)
service2 = super_client.wait_success(service2)
assert service1.state == "inactive"
assert service2.state == "inactive"
_validate_instance_stopped(service1, super_client, env)
_validate_instance_stopped(service2, super_client, env)
def test_remove_inactive_service(super_client, admin_client, sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid}
service = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service = super_client.wait_success(service)
assert service.state == "inactive"
# activate service
service = wait_success(super_client, service.activate(), 120)
assert service.state == "active"
instance_service_map = super_client. \
list_serviceExposeMap(serviceId=service.id)
assert len(instance_service_map) == 1
wait_for_condition(
super_client, instance_service_map[0], _resource_is_active,
lambda x: 'State is: ' + x.state)
_validate_compose_instance_start(super_client, service, env, "1")
# deactivate service
service = wait_success(super_client, service.deactivate())
assert service.state == "inactive"
# remove service
service = wait_success(super_client, service.remove())
assert service.state == "removed"
_validate_compose_instance_removed(super_client, service, env)
def test_remove_environment(super_client, admin_client, sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid}
service = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service = super_client.wait_success(service)
assert service.state == "inactive"
# activate services
env = env.activateservices()
service = super_client.wait_success(service, 120)
assert service.state == "active"
instance_service_map = super_client. \
list_serviceExposeMap(serviceId=service.id)
assert len(instance_service_map) == 1
wait_for_condition(
super_client, instance_service_map[0], _resource_is_active,
lambda x: 'State is: ' + x.state)
_validate_compose_instance_start(super_client, service, env, "1")
# deactivate services
env = env.deactivateservices()
service = super_client.wait_success(service)
assert service.state == "inactive"
# remove environment
env = wait_success(admin_client, env.remove())
assert env.state == "removed"
wait_for_condition(
super_client, service, _resource_is_removed,
lambda x: 'State is: ' + x.state)
def test_create_duplicated_services(super_client, admin_client,
sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid}
service_name = random_str()
service1 = super_client.create_service(name=service_name,
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
super_client.wait_success(service1)
with pytest.raises(ApiError) as e:
super_client.create_service(name=service_name,
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
assert e.value.error.status == 422
assert e.value.error.code == 'NotUnique'
assert e.value.error.fieldName == 'name'
def test_service_add_remove_service_link(super_client, admin_client,
sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid}
service1 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service1 = super_client.wait_success(service1)
service2 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service2 = super_client.wait_success(service2)
# link service2 to service1
service1 = service1.addservicelink(serviceId=service2.id)
_validate_add_service_link(service1, service2, super_client)
# remove service link
service1 = service1.removeservicelink(serviceId=service2.id)
_validate_remove_service_link(service1, service2, super_client)
def test_link_service_twice(super_client, admin_client,
sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid}
service1 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service1 = super_client.wait_success(service1)
service2 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service2 = super_client.wait_success(service2)
# link servic2 to service1
service1 = service1.addservicelink(serviceId=service2.id)
_validate_add_service_link(service1, service2, super_client)
# try to link again
with pytest.raises(ApiError) as e:
service1.addservicelink(serviceId=service2.id)
assert e.value.error.status == 422
assert e.value.error.code == 'NotUnique'
assert e.value.error.fieldName == 'serviceId'
def test_links_after_service_remove(super_client, admin_client,
sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid}
service1 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service1 = super_client.wait_success(service1)
service2 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service2 = super_client.wait_success(service2)
# link servic2 to service1
service1 = service1.addservicelink(serviceId=service2.id)
_validate_add_service_link(service1, service2, super_client)
# link service1 to service2
service2 = service2.addservicelink(serviceId=service1.id)
_validate_add_service_link(service2, service1, super_client)
# remove service1
service1 = wait_success(super_client, service1.remove())
_validate_remove_service_link(service1, service2, super_client)
_validate_remove_service_link(service2, service1, super_client)
def test_link_volumes(super_client, admin_client,
sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid,
"labels": {'io.rancher.service.sidekick': "random"}}
service1 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service1 = super_client.wait_success(service1)
service1 = wait_success(super_client, service1.activate(), 120)
container1 = _validate_compose_instance_start(super_client,
service1, env, "1")
external_container = super_client.create_container(
imageUuid=image_uuid,
requestedHostId=container1.hosts()[0].id)
external_container = super_client.wait_success(external_container)
launch_config = {"imageUuid": image_uuid,
"dataVolumesFrom": [external_container.id],
"labels": {'io.rancher.service.sidekick': "random"}}
service2 = super_client. \
create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config,
dataVolumesFromService=[service1.id])
service2 = super_client.wait_success(service2)
service2 = wait_success(super_client, service2.activate(), 120)
container2 = _validate_compose_instance_start(super_client,
service2, env, "1")
# verify that the instance started in service2,
# got volume of instance of service1
assert len(container2.dataVolumesFrom) == 2
assert set(container2.dataVolumesFrom) == set([external_container.id,
container1.id])
def test_volumes_service_links_scale_one(super_client, admin_client,
sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid,
"labels": {'io.rancher.service.sidekick': "random"}}
service1 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service1 = super_client.wait_success(service1)
launch_config = {"imageUuid": image_uuid,
"labels": {'io.rancher.service.sidekick': "random"}}
service2 = super_client. \
create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config,
dataVolumesFromService=[service1.id])
service2 = super_client.wait_success(service2)
service3 = super_client. \
create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config,
dataVolumesFromService=[service1.id, service2.id])
service3 = super_client.wait_success(service3)
service1 = wait_success(super_client, service1.activate(), 120)
service2 = super_client.wait_success(service2, 120)
service3 = super_client.wait_success(service3, 120)
assert service1.state == "active"
assert service3.state == "active"
assert service2.state == "active"
# 2. validate instances
s1_container = _validate_compose_instance_start(super_client,
service1, env, "1")
s2_container = _validate_compose_instance_start(super_client,
service2, env, "1")
s3_container = _validate_compose_instance_start(super_client,
service3, env, "1")
assert len(s2_container.dataVolumesFrom) == 1
assert set(s2_container.dataVolumesFrom) == set([s1_container.id])
assert len(s3_container.dataVolumesFrom) == 2
assert set(s3_container.dataVolumesFrom) == set([s1_container.id,
s2_container.id])
def test_volumes_service_links_scale_two(super_client, admin_client,
sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid,
"labels": {'io.rancher.service.sidekick': "random"}}
service1 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config,
scale=2)
service1 = super_client.wait_success(service1)
launch_config = {"imageUuid": image_uuid,
"labels": {'io.rancher.service.sidekick': "random"}}
service2 = super_client. \
create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config,
dataVolumesFromService=[service1.id],
scale=2)
service2 = super_client.wait_success(service2)
service1 = wait_success(super_client, service1.activate(), 120)
service2 = super_client.wait_success(service2, 120)
assert service1.state == "active"
assert service2.state == "active"
# 2. validate instances
_validate_compose_instance_start(super_client,
service1, env, "1")
_validate_compose_instance_start(super_client,
service1, env, "2")
s21_container = _validate_compose_instance_start(super_client,
service2, env, "1")
s22_container = _validate_compose_instance_start(super_client,
service2, env, "2")
assert len(s22_container.dataVolumesFrom) == 1
assert len(s21_container.dataVolumesFrom) == 1
def test_remove_active_service(super_client, admin_client, sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid}
service = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service = super_client.wait_success(service)
assert service.state == "inactive"
# activate service
service = wait_success(super_client, service.activate(), 120)
assert service.state == "active"
instance_service_map = super_client. \
list_serviceExposeMap(serviceId=service.id)
assert len(instance_service_map) == 1
wait_for_condition(
super_client, instance_service_map[0], _resource_is_active,
lambda x: 'State is: ' + x.state)
_validate_compose_instance_start(super_client, service, env, "1")
# remove service
service = wait_success(super_client, service.remove(), 120)
assert service.state == "removed"
_validate_compose_instance_removed(super_client, service, env)
def _wait_until_active_map_count(service, count, super_client, timeout=30):
# need this function because agent state changes
# active->deactivating->removed
start = time.time()
instance_service_map = super_client. \
list_serviceExposeMap(serviceId=service.id, state="active")
while len(instance_service_map) != count:
time.sleep(.5)
instance_service_map = super_client. \
list_serviceExposeMap(serviceId=service.id, state="active")
if time.time() - start > timeout:
assert 'Timeout waiting for map to be removed.'
return
def test_remove_environment_w_active_svcs(super_client,
admin_client, sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid}
service = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service = super_client.wait_success(service)
assert service.state == "inactive"
# activate services
env = env.activateservices()
service = super_client.wait_success(service, 120)
assert service.state == "active"
instance_service_map = super_client. \
list_serviceExposeMap(serviceId=service.id)
assert len(instance_service_map) == 1
wait_for_condition(
super_client, instance_service_map[0], _resource_is_active,
lambda x: 'State is: ' + x.state)
_validate_compose_instance_start(super_client, service, env, "1")
# remove environment
env = wait_success(admin_client, env.remove())
assert env.state == "removed"
service = super_client.wait_success(service)
_validate_compose_instance_removed(super_client, service, env)
def _validate_compose_instance_start(super_client, service, env, number):
instances = super_client. \
list_container(name=env.name + "_" + service.name + "_" + number,
state="running")
assert len(instances) == 1
return instances[0]
def _validate_instance_start(service, super_client, name):
instances = super_client. \
list_container(name=name)
assert len(instances) == 1
return instances[0]
def test_validate_service_scaleup_scaledown(super_client,
admin_client, sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid}
service = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config,
scale=2)
service = super_client.wait_success(service)
assert service.state == "inactive"
# scale up the inactive service
service = super_client.update(service, scale=3, name=service.name)
service = super_client.wait_success(service, 120)
assert service.state == "inactive"
assert service.scale == 3
# activate services
env.activateservices()
service = super_client.wait_success(service, 120)
assert service.state == "active"
_validate_compose_instance_start(super_client, service, env, "1")
instance2 = _validate_compose_instance_start(super_client, service,
env, "2")
instance3 = _validate_compose_instance_start(super_client, service,
env, "3")
# stop the instance2
instance2 = wait_success(super_client, instance2)
instance2 = wait_success(super_client, instance2.stop())
assert instance2.state == 'stopped'
# rename the instance 3
instance3 = super_client.update(instance3, name='newName')
# scale up the service
# instance 2 should get started; env_service_3 name should be utilized
service = super_client.update(service, scale=4, name=service.name)
service = super_client.wait_success(service, 120)
assert service.state == "active"
assert service.scale == 4
_validate_compose_instance_start(super_client, service, env, "1")
_validate_compose_instance_start(super_client, service, env, "2")
_validate_compose_instance_start(super_client, service, env, "3")
_validate_instance_start(service, super_client, instance3.name)
# scale down the service
service = super_client.update(service, scale=2, name=service.name)
service = super_client.wait_success(service, 120)
assert service.state == "active"
# validate that only 2 service instance mappings exist
instance_service_map = super_client. \
list_serviceExposeMap(serviceId=service.id, state="active")
assert len(instance_service_map) == 2
def test_link_services_from_diff_env(super_client, admin_client,
sim_context, nsp):
env1 = admin_client.create_environment(name=random_str())
env1 = admin_client.wait_success(env1)
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid}
service1 = super_client.create_service(name=random_str(),
environmentId=env1.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service1 = super_client.wait_success(service1)
env2 = admin_client.create_environment(name=random_str())
env2 = admin_client.wait_success(env2)
service2 = super_client.create_service(name=random_str(),
environmentId=env2.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service2 = super_client.wait_success(service2)
# try to link
with pytest.raises(ApiError) as e:
service1.addservicelink(serviceId=service2.id)
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidReference'
assert e.value.error.fieldName == 'serviceId'
def test_set_service_links(super_client, admin_client,
sim_context, nsp):
env1 = admin_client.create_environment(name=random_str())
env1 = admin_client.wait_success(env1)
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid}
service1 = super_client.create_service(name=random_str(),
environmentId=env1.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service1 = super_client.wait_success(service1)
service2 = super_client.create_service(name=random_str(),
environmentId=env1.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service2 = super_client.wait_success(service2)
service3 = super_client.create_service(name=random_str(),
environmentId=env1.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service3 = super_client.wait_success(service3)
# set service2, service3 links for service1
service1 = service1.setservicelinks(serviceIds=[service2.id, service3.id])
_validate_add_service_link(service1, service2, super_client)
_validate_add_service_link(service1, service3, super_client)
# set service2 links for service1
service1 = service1.setservicelinks(serviceIds=[service2.id])
_validate_add_service_link(service1, service2, super_client)
_validate_remove_service_link(service1, service3, super_client)
# set empty service link set
service1 = service1.setservicelinks(serviceIds=[])
_validate_remove_service_link(service1, service2, super_client)
_validate_remove_service_link(service1, service3, super_client)
# try to link to the service from diff environment
env2 = admin_client.create_environment(name=random_str())
env2 = admin_client.wait_success(env2)
service4 = super_client.create_service(name=random_str(),
environmentId=env2.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service4 = super_client.wait_success(service4)
with pytest.raises(ApiError) as e:
service1.setservicelinks(serviceIds=[service4.id])
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidReference'
assert e.value.error.fieldName == 'serviceId'
def _instance_remove(instance, super_client):
instance = wait_success(super_client, instance)
instance = wait_success(super_client, instance.stop())
assert instance.state == 'stopped'
instance = wait_success(super_client, instance.remove())
assert instance.state == 'removed'
return instance
def test_destroy_service_instance(super_client,
admin_client, sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid}
service = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config,
scale=3)
service = super_client.wait_success(service)
assert service.state == "inactive"
# activate service
service.activate()
service = super_client.wait_success(service, 120)
assert service.state == "active"
instance1 = _validate_compose_instance_start(super_client, service,
env, "1")
instance2 = _validate_compose_instance_start(super_client, service,
env, "2")
instance3 = _validate_compose_instance_start(super_client, service,
env, "3")
return
# 1. stop and remove the instance2. Validate the mapping still exist
instance2 = _instance_remove(instance2, super_client)
instance_service_map = super_client. \
list_serviceExposeMap(serviceId=service.id, instanceId=instance2.id)
assert len(instance_service_map) == 1
wait_for_condition(
super_client, instance_service_map[0], _resource_is_active,
lambda x: 'State is: ' + x.state)
# 2. deactivate the service
service.deactivate()
service = super_client.wait_success(service, 120)
assert service.state == "inactive"
# 3. activate the service. The map should be gone
service.activate()
service = super_client.wait_success(service, 120)
assert service.state == "active"
# 4. destroy instance3 and update the service's scale.
# Validate that instance3 map is gone
instance3 = _instance_remove(instance3, super_client)
service = super_client.update(service, scale=4, name=service.name)
service = super_client.wait_success(service, 120)
instance_service_map = super_client. \
list_serviceExposeMap(serviceId=service.id, instanceId=instance3.id)
assert len(instance_service_map) == 1
wait_for_condition(
super_client, instance_service_map[0], _resource_is_removed,
lambda x: 'State is: ' + x.state)
# purge the instance1 w/o changing the service
# and validate instance1-service map is gone
instance1 = _instance_remove(instance1, super_client)
instance1 = wait_success(super_client, instance1.purge())
assert instance1.state == 'purged'
instance_service_map = super_client. \
list_serviceExposeMap(serviceId=service.id, instanceId=instance1.id)
assert len(instance_service_map) == 1
wait_for_condition(
super_client, instance_service_map[0], _resource_is_removed,
lambda x: 'State is: ' + x.state)
def test_service_rename(super_client, admin_client, sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid}
service = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config,
scale=2)
service = super_client.wait_success(service)
# activate service
service.activate()
service = super_client.wait_success(service, 120)
assert service.state == "active"
_validate_compose_instance_start(super_client, service, env, "1")
_validate_compose_instance_start(super_client, service, env, "2")
# update name and validate that the service name got
# updated as well as its instances
new_name = "newname"
service = super_client.update(service, scale=3, name=new_name)
service = super_client.wait_success(service)
assert service.name == new_name
_validate_compose_instance_start(super_client, service, env, "1")
_validate_compose_instance_start(super_client, service, env, "2")
def test_env_rename(super_client, admin_client, sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid}
service_1 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config,
scale=2)
service_1 = super_client.wait_success(service_1)
service_2 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config,
scale=1)
service_2 = super_client.wait_success(service_2)
# activate services
env = env.activateservices()
service_1 = super_client.wait_success(service_1, 120)
service_2 = super_client.wait_success(service_2, 120)
assert service_1.state == "active"
assert service_2.state == "active"
_validate_compose_instance_start(super_client, service_1, env, "1")
_validate_compose_instance_start(super_client, service_1, env, "2")
_validate_compose_instance_start(super_client, service_2, env, "1")
# update env name and validate that the
# env name got updated as well as all instances
new_name = "newname"
env = admin_client.update(env, name=new_name)
env = admin_client.wait_success(env)
assert env.name == new_name
_validate_compose_instance_start(super_client, service_1, env, "1")
_validate_compose_instance_start(super_client, service_1, env, "2")
_validate_compose_instance_start(super_client, service_2, env, "1")
def test_validate_scale_down_restore_state(super_client,
admin_client, sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid}
service = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config,
scale=3)
service = super_client.wait_success(service)
assert service.state == "inactive"
# activate services
env.activateservices()
service = super_client.wait_success(service, 120)
assert service.state == "active"
instance1 = _validate_compose_instance_start(super_client, service,
env, "1")
instance2 = _validate_compose_instance_start(super_client, service,
env, "2")
instance3 = _validate_compose_instance_start(super_client, service,
env, "3")
# stop the instances 1, 2 and destroy instance 3
instance1 = wait_success(super_client, instance1.stop())
assert instance1.state == 'stopped'
instance2 = wait_success(super_client, instance2.stop())
assert instance2.state == 'stopped'
instance3 = _instance_remove(instance3, super_client)
assert instance3.state == 'removed'
# scale down the service and validate that:
# first instance is running
# second instance is removed
# third instance is removed
service = super_client.update(service, scale=1, name=service.name)
super_client.wait_success(service)
# validate that only one service instance mapping exists
instance_service_map = super_client. \
list_serviceExposeMap(serviceId=service.id, state="active")
assert len(instance_service_map) == 1
def test_validate_labels(super_client, admin_client, sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
# create service1 with labels defined
service_name1 = random_str()
initial_labels1 = {'affinity': "container==B", '!affinity': "container==C"}
image_uuid = sim_context['imageUuid']
launch_config1 = {"imageUuid": image_uuid, "labels": initial_labels1}
service1 = super_client.create_service(name=service_name1,
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config1)
service1 = super_client.wait_success(service1)
assert service1.state == "inactive"
assert service1.launchConfig.labels == initial_labels1
# create service2 w/o labels defined
service_name2 = random_str()
image_uuid = sim_context['imageUuid']
launch_config2 = {"imageUuid": image_uuid}
service2 = super_client.create_service(name=service_name2,
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config2)
service2 = super_client.wait_success(service2)
assert service2.state == "inactive"
assert "labels" not in service2.launchConfig
# activate services
env.activateservices()
service1 = super_client.wait_success(service1, 120)
assert service1.state == "active"
service2 = super_client.wait_success(service2, 120)
assert service2.state == "active"
# check that labels defined in launch config + the internal label, are set
result_labels_1 = {'affinity': 'container==B', '!affinity': "container==C",
'io.rancher.service.name': service_name1,
'io.rancher.environment.name': env.name}
instance1 = _validate_compose_instance_start(super_client, service1,
env, "1")
assert all(item in instance1.labels for item in result_labels_1) is True
# check that only one internal label is set
result_labels_2 = {'io.rancher.service.name': service_name2,
'io.rancher.environment.name': env.name}
instance2 = _validate_compose_instance_start(super_client, service2,
env, "1")
assert all(item in instance2.labels for item in result_labels_2) is True
def test_sidekick_services_activate(super_client,
admin_client, sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
# create service1/service2 with the same sidekick label defined
# service3 with a diff sidekick label, and service4 with no label
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid,
"labels": {'io.rancher.service.sidekick': "random"}}
service1 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service1 = super_client.wait_success(service1)
service2 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config)
service2 = super_client.wait_success(service2)
launch_config1 = {"imageUuid": image_uuid}
service3 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config1)
service3 = super_client.wait_success(service3)
launch_config2 = {"imageUuid": image_uuid,
"labels": {'io.rancher.service.sidekick': "random123"}}
service4 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config2)
service4 = super_client.wait_success(service4)
# activate service1, service 2 should be activated too
service1 = wait_success(super_client, service1.activate(), 120)
assert service1.state == "active"
service2 = super_client.wait_success(service2, 120)
assert service2.state == "active"
# service 3 and 4 should be inactive
service3 = super_client.wait_success(service3)
assert service3.state == "inactive"
service4 = super_client.wait_success(service4)
assert service4.state == "inactive"
def test_sidekick_restart_instances(super_client,
admin_client, sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
# create service1/service2 with the same sidekick label defined
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid,
"labels": {'io.rancher.service.sidekick': "random"}}
service1 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config,
scale=2)
service1 = super_client.wait_success(service1)
service2 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config, scale=2)
service2 = super_client.wait_success(service2)
# activate service1, service 2 should be activated too
service1 = wait_success(super_client, service1.activate(), 120)
assert service1.state == "active"
service2 = super_client.wait_success(service2, 120)
assert service2.state == "active"
instance11 = _validate_compose_instance_start(super_client,
service1, env, "1")
_validate_compose_instance_start(super_client, service1, env, "2")
_validate_compose_instance_start(super_client, service2, env, "1")
instance22 = _validate_compose_instance_start(super_client,
service2, env, "2")
instance_service_map1 = super_client. \
list_serviceExposeMap(serviceId=service1.id, state="active")
assert len(instance_service_map1) == 2
instance_service_map2 = super_client. \
list_serviceExposeMap(serviceId=service2.id, state="active")
assert len(instance_service_map2) == 2
# stop instance11, destroy instance12 and call update on a service1
# scale should be restored
wait_success(super_client, instance11.stop())
_instance_remove(instance22, super_client)
service1 = super_client.update(service1, scale=2, name=service1.name)
service1 = super_client.wait_success(service1, 120)
_validate_compose_instance_start(super_client, service1, env, "1")
_validate_compose_instance_start(super_client, service1, env, "2")
_validate_compose_instance_start(super_client, service2, env, "1")
_validate_compose_instance_start(super_client, service2, env, "2")
instance_service_map1 = super_client. \
list_serviceExposeMap(serviceId=service1.id, state="active")
assert len(instance_service_map1) == 2
instance_service_map2 = super_client. \
list_serviceExposeMap(serviceId=service2.id, state="active")
assert len(instance_service_map2) == 2
def test_sidekick_scaleup(super_client, admin_client, sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
# create service1/service2 with the same sidekick label defined
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid,
"labels": {'io.rancher.service.sidekick': "random"}}
service1 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config,
scale=1)
service1 = super_client.wait_success(service1)
service2 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config, scale=1)
service2 = super_client.wait_success(service2)
# activate service1, service 2 should be activated too
service1 = wait_success(super_client, service1.activate(), 120)
assert service1.state == "active"
service2 = super_client.wait_success(service2, 120)
assert service2.state == "active"
_validate_compose_instance_start(super_client, service1, env, "1")
_validate_compose_instance_start(super_client, service2, env, "1")
# scale up service1, verify that the service 2 was scaled up and updated
service1 = super_client.update(service1, scale=2, name=service1.name)
_wait_compose_instance_start(super_client, service1, env, "1")
_wait_compose_instance_start(super_client, service1, env, "2")
_wait_compose_instance_start(super_client, service2, env, "1")
_wait_compose_instance_start(super_client, service2, env, "2")
service1 = super_client.wait_success(service1, 120)
assert service1.state == "active"
assert service1.scale == 2
service2 = super_client.wait_success(service2, 120)
assert service2.state == "active"
assert service2.scale == 2
instance_service_map1 = super_client. \
list_serviceExposeMap(serviceId=service1.id, state="active")
assert len(instance_service_map1) == 2
instance_service_map2 = super_client. \
list_serviceExposeMap(serviceId=service2.id, state="active")
assert len(instance_service_map2) == 2
def test_sidekick_diff_scale(super_client, admin_client, sim_context, nsp):
env = admin_client.create_environment(name=random_str())
env = admin_client.wait_success(env)
assert env.state == "active"
# create service1/service2 with the same sidekick label defined,
# but diff scale - should fail
image_uuid = sim_context['imageUuid']
launch_config = {"imageUuid": image_uuid,
"labels": {'io.rancher.service.sidekick': "random"}}
service1 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config,
scale=2)
service1 = super_client.wait_success(service1)
assert service1.scale == 2
service2 = super_client.create_service(name=random_str(),
environmentId=env.id,
networkId=nsp.networkId,
launchConfig=launch_config,
scale=3)
service2 = super_client.wait_success(service2)
assert service2.scale == 2
def _wait_compose_instance_start(super_client, service,
env, number, timeout=30):
start = time.time()
instances = super_client. \
list_container(name=env.name + "_" + service.name + "_" + number,
state="running")
while len(instances) != 1:
time.sleep(.5)
instances = super_client. \
list_container(name=env.name + "_" + service.name + "_" + number,
state="running")
if time.time() - start > timeout:
assert 'Timeout waiting for instance to become running.'
def _create_registry_credential(admin_client):
registry = _create_registry(admin_client)
reg_cred = admin_client.create_registry_credential(
registryId=registry.id,
email='test@rancher.com',
publicValue='wizardofmath+whisper',
secretValue='W0IUYDBM2VORHM4DTTEHSMKLXGCG3KD3IT081QWWTZA11R9DZS2DDPP72'
'48NUTT6')
assert reg_cred is not None
assert reg_cred.email == 'test@rancher.com'
assert reg_cred.kind == 'registryCredential'
assert reg_cred.registryId == registry.id
assert reg_cred.publicValue == 'wizardofmath+whisper'
assert 'secretValue' not in reg_cred
return reg_cred
def _create_registry(admin_client):
registry = admin_client.create_registry(serverAddress='quay.io',
name='Quay')
assert registry.serverAddress == 'quay.io'
assert registry.name == 'Quay'
return registry
def _resource_is_stopped(resource):
return resource.state == 'stopped'
def _resource_is_running(resource):
return resource.state == 'running'
def _validate_add_service_link(service, consumedService, super_client):
service_maps = super_client. \
list_serviceConsumeMap(serviceId=service.id,
consumedServiceId=consumedService.id)
assert len(service_maps) == 1
service_map = service_maps[0]
wait_for_condition(
super_client, service_map, _resource_is_active,
lambda x: 'State is: ' + x.state)
def _validate_remove_service_link(service, consumedService, super_client):
service_maps = super_client. \
list_serviceConsumeMap(serviceId=service.id,
consumedServiceId=consumedService.id)
assert len(service_maps) == 1
service_map = service_maps[0]
wait_for_condition(
super_client, service_map, _resource_is_removed,
lambda x: 'State is: ' + x.state)
def _resource_is_active(resource):
return resource.state == 'active'
def _resource_is_removed(resource):
return resource.state == 'removed'
| |
import gzip
import os
import shutil
import subprocess
import tarfile
import time
import urllib.request
from contextlib import closing
from typing import List
from data_refinery_common.logging import get_and_configure_logger
from data_refinery_common.models import (
DownloaderJob,
DownloaderJobOriginalFileAssociation,
Experiment,
ExperimentAnnotation,
ExperimentSampleAssociation,
OriginalFile,
OriginalFileSampleAssociation,
Sample,
)
from data_refinery_common.utils import get_env_variable
from data_refinery_workers.downloaders import utils
logger = get_and_configure_logger(__name__)
LOCAL_ROOT_DIR = get_env_variable("LOCAL_ROOT_DIR", "/home/user/data_store")
# chunk_size is in bytes
CHUNK_SIZE = 1024 * 256
def _download_file(download_url: str, file_path: str, job: DownloaderJob, force_ftp=False) -> None:
""" Download a file from GEO via Aspera unless `force_ftp` is True
"""
# Ensure directory exists
os.makedirs(file_path.rsplit('/', 1)[0], exist_ok=True)
if not force_ftp:
return _download_file_aspera(download_url=download_url, downloader_job=job, target_file_path=file_path)
else:
try:
logger.debug("Downloading file from %s to %s.",
download_url,
file_path,
downloader_job=job.id)
# Ancient unresolved bug. WTF python: https://bugs.python.org/issue27973
urllib.request.urlcleanup()
target_file = open(file_path, "wb")
with closing(urllib.request.urlopen(download_url)) as request:
shutil.copyfileobj(request, target_file, CHUNK_SIZE)
urllib.request.urlcleanup()
except Exception:
logger.exception("Exception caught while downloading file.",
downloader_job=job.id)
job.failure_reason = "Exception caught while downloading file"
raise
finally:
target_file.close()
return True
def _download_file_aspera(download_url: str,
downloader_job: DownloaderJob,
target_file_path: str,
attempt=0) -> bool:
""" Download a file to a location using Aspera by shelling out to the `ascp` client. """
try:
logger.debug("Downloading file from %s to %s via Aspera.",
download_url,
target_file_path,
downloader_job=downloader_job.id)
ascp = ".aspera/cli/bin/ascp"
key = ".aspera/cli/etc/asperaweb_id_dsa.openssh"
url = download_url
user = "anonftp"
ftp = "ftp-trace.ncbi.nlm.nih.gov"
if url.startswith("ftp://"):
url = url.replace("ftp://", "")
url = url.replace(ftp, "").replace('ftp.ncbi.nlm.nih.gov', '')
# Resume level 1, use encryption, unlimited speed
command_str = "{} -i {} -k1 -T {}@{}:{} {}".format(ascp, key, user, ftp, url, target_file_path)
formatted_command = command_str.format(src=download_url,
dest=target_file_path)
completed_command = subprocess.run(formatted_command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Something went wrong! Else, just fall through to returning True.
if completed_command.returncode != 0:
stderr = completed_command.stderr.decode().strip()
logger.debug("Shell call of `%s` to ascp failed with error message: %s",
formatted_command,
stderr,
downloader_job=downloader_job.id)
# Sometimes, GEO fails mysteriously.
# Wait a few minutes and try again.
if attempt >= 5:
downloader_job.failure_reason = stderr
logger.error("All attempts to download accession via ascp failed: %s\nCommand was: %s",
stderr,
formatted_command,
downloader_job=downloader_job.id)
return False
else:
time.sleep(30)
return _download_file_aspera(download_url,
downloader_job,
target_file_path,
attempt + 1
)
except Exception:
logger.exception("Exception caught while downloading file from the URL via Aspera: %s",
download_url,
downloader_job=downloader_job.id)
downloader_job.failure_reason = ("Exception caught while downloading "
"file from the URL via Aspera: {}").format(download_url)
return False
# If Aspera has given a zero-byte file for some reason, let's back off and retry.
if os.path.getsize(target_file_path) < 1:
os.remove(target_file_path)
if attempt > 5:
downloader_job.failure_reason = "Got zero byte file from aspera after 5 attempts."
return False
logger.error("Got zero byte ascp download for target, retrying.",
target_url=download_url,
downloader_job=downloader_job.id)
time.sleep(10)
return _download_file_aspera(download_url,
downloader_job,
target_file_path,
attempt + 1
)
return True
def _extract_tar(file_path: str, accession_code: str) -> List[str]:
"""Extract tar and return a list of the raw files.
"""
logger.debug("Extracting %s!", file_path, file_path=file_path)
try:
# This is technically an unsafe operation.
# However, we're trusting GEO as a data source.
zip_ref = tarfile.TarFile(file_path, "r")
abs_with_code_raw = LOCAL_ROOT_DIR + '/' + accession_code + '/raw/'
zip_ref.extractall(abs_with_code_raw)
zip_ref.close()
# os.abspath doesn't do what I thought it does, hency this monstrocity.
files = [{'absolute_path': abs_with_code_raw + f, 'filename': f}
for f in os.listdir(abs_with_code_raw)]
except Exception as e:
logger.exception("While extracting %s caught exception %s",
file_path,
str(e),
accession_code=accession_code,
file_path=file_path)
raise
return files
def _extract_tgz(file_path: str, accession_code: str) -> List[str]:
"""Extract tgz and return a list of the raw files.
"""
logger.debug("Extracting %s!", file_path, file_path=file_path)
try:
extracted_filepath = file_path.replace('.tgz', '.tar')
with gzip.open(file_path, 'rb') as f_in:
with open(extracted_filepath, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
zip_ref = tarfile.TarFile(extracted_filepath, "r")
abs_with_code_raw = LOCAL_ROOT_DIR + '/' + accession_code + '/raw/'
zip_ref.extractall(abs_with_code_raw)
zip_ref.close()
files = [{'absolute_path': abs_with_code_raw + f, 'filename': f}
for f in os.listdir(abs_with_code_raw)]
except Exception as e:
reason = "Exception %s caught while extracting %s", str(e), file_path
logger.exception(reason, accession_code=accession_code, file_path=file_path)
raise
return files
def _extract_gz(file_path: str, accession_code: str) -> List[str]:
"""Extract gz and return a list of the raw files.
"""
logger.debug("Extracting %s!", file_path, file_path=file_path)
try:
extracted_filepath = file_path.replace('.gz', '')
with gzip.open(file_path, 'rb') as f_in:
with open(extracted_filepath, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
files = [{'absolute_path': extracted_filepath,
'filename': extracted_filepath.rsplit('/', 1)[1]
}]
except Exception as e:
logger.exception("While extracting %s caught exception %s",
file_path,
str(e),
accession_code=accession_code,
file_path=file_path)
raise
return files
def download_geo(job_id: int) -> None:
"""The main function for the GEO Downloader.
Downloads a single tar file containing the files representing
samples relating to a single experiement stored in
GEO.
"""
job = utils.start_job(job_id)
file_assocs = DownloaderJobOriginalFileAssociation.objects.filter(downloader_job=job)
original_file = file_assocs[0].original_file
url = original_file.source_url
accession_code = job.accession_code
sample_assocs = OriginalFileSampleAssociation.objects.filter(original_file=original_file)
related_samples = Sample.objects.filter(id__in=sample_assocs.values('sample_id'))
# First, download the sample archive URL.
# Then, unpack all the ones downloaded.
# Then create processor jobs!
# The files for all of the samples are
# contained within the same zip file. Therefore only
# download the one.
os.makedirs(LOCAL_ROOT_DIR + '/' + accession_code, exist_ok=True)
dl_file_path = LOCAL_ROOT_DIR + '/' + accession_code + '/' + url.split('/')[-1]
logger.debug("Starting to download: " + url, job_id=job_id, accession_code=accession_code)
_download_file(url, dl_file_path, job)
original_file.absolute_file_path = dl_file_path
original_file.is_downloaded = True
original_file.save()
has_raw = True
unpacked_sample_files = []
# These files are tarred, and also subsequently gzipped
if '.tar' in dl_file_path:
try:
extracted_files = _extract_tar(dl_file_path, accession_code)
except Exception as e:
job.failure_reason = e
logger.exception(
"Error occured while extracting tar file.", path=dl_file_path, exception=str(e))
utils.end_downloader_job(job, success=False)
return
for og_file in extracted_files:
filename = og_file['filename']
if '_' in filename:
sample_id = filename.split('_')[0]
else:
sample_id = filename.split('.')[0]
try:
sample = Sample.objects.get(accession_code=sample_id)
except Exception as e:
# We don't have this sample, but it's not a total failure. This happens.
continue
try:
# Files from the GEO supplemental file are gzipped inside of the tarball. Great!
archive_file = OriginalFile.objects.get(source_filename__contains=sample_id)
archive_file.is_downloaded = True
archive_file.is_archive = True
archive_file.absolute_file_path = og_file['absolute_path']
archive_file.calculate_size()
archive_file.calculate_sha1()
archive_file.save()
if '.gz' in og_file['filename']:
extracted_subfile = _extract_gz(og_file['absolute_path'], accession_code)
else:
extracted_subfile = [og_file]
actual_file = OriginalFile()
actual_file.is_downloaded = True
actual_file.is_archive = False
actual_file.absolute_file_path = extracted_subfile[0]['absolute_path']
actual_file.filename = extracted_subfile[0]['filename']
actual_file.calculate_size()
actual_file.calculate_sha1()
actual_file.has_raw = True
actual_file.source_url = original_file.source_url
actual_file.source_filename = original_file.source_filename
actual_file.save()
original_file_sample_association = OriginalFileSampleAssociation()
original_file_sample_association.sample = sample
original_file_sample_association.original_file = actual_file
original_file_sample_association.save()
archive_file.delete_local_file()
archive_file.is_downloaded = False
archive_file.save()
unpacked_sample_files.append(actual_file)
except Exception as e:
# TODO - is this worth failing a job for?
logger.debug("Found a file we didn't have an OriginalFile for! Why did this happen?: "
+ og_file['filename'],
exc_info=1,
file=og_file['filename'],
sample_id=sample_id,
accession_code=accession_code)
# If we don't know why we have it, get rid of it.
os.remove(og_file["absolute_path"])
# This is a .tgz file.
elif '.tgz' in dl_file_path:
# If this is the MINiML file, it has been preprocessed
if '_family.xml.tgz' in dl_file_path:
has_raw = False
try:
extracted_files = _extract_tgz(dl_file_path, accession_code)
except Exception as e:
job.failure_reason = e
logger.exception("Error occured while extracting tgz file.",
path=dl_file_path,
exception=str(e))
utils.end_downloader_job(job, success=False)
return
for og_file in extracted_files:
if '.txt' in og_file['filename']:
try:
gsm_id = og_file['filename'].split('-')[0]
sample = Sample.objects.get(accession_code=gsm_id)
except Exception as e:
os.remove(og_file["absolute_path"])
continue
actual_file = OriginalFile()
actual_file.is_downloaded = True
actual_file.is_archive = False
actual_file.absolute_file_path = og_file['absolute_path']
actual_file.filename = og_file['filename']
actual_file.calculate_size()
actual_file.calculate_sha1()
actual_file.has_raw = has_raw
actual_file.source_url = original_file.source_url
actual_file.source_filename = original_file.source_filename
actual_file.save()
original_file_sample_association = OriginalFileSampleAssociation()
original_file_sample_association.sample = sample
original_file_sample_association.original_file = actual_file
original_file_sample_association.save()
unpacked_sample_files.append(actual_file)
# These files are only gzipped.
# These are generally the _actually_ raw (rather than the non-raw data in a RAW file) data
elif '.gz' in dl_file_path:
try:
extracted_files = _extract_gz(dl_file_path, accession_code)
except Exception as e:
job.failure_reason = e
logger.exception("Error occured while extracting gz file.",
path=dl_file_path,
exception=str(e))
utils.end_downloader_job(job, success=False)
return
for og_file in extracted_files:
filename = og_file['filename']
sample_id = filename.split('.')[0]
try:
# The archive we downloaded
archive_file = OriginalFile.objects.get(source_filename__contains=filename)
archive_file.is_downloaded = True
archive_file.is_archive = True
archive_file.absolute_file_path = dl_file_path
archive_file.calculate_size()
archive_file.calculate_sha1()
archive_file.save()
actual_file = OriginalFile()
actual_file.is_downloaded = True
actual_file.is_archive = False
actual_file.absolute_file_path = og_file['absolute_path']
actual_file.filename = og_file['filename']
actual_file.calculate_size()
actual_file.calculate_sha1()
actual_file.has_raw = True
actual_file.source_url = original_file.source_url
actual_file.source_filename = original_file.source_filename
actual_file.save()
for sample in related_samples:
new_association = OriginalFileSampleAssociation()
new_association.original_file = actual_file
new_association.sample = sample
new_association.save()
archive_file.delete_local_file()
archive_file.is_downloaded = False
archive_file.save()
unpacked_sample_files.append(actual_file)
except Exception as e:
logger.debug("Found a file we didn't have an OriginalFile for! Why did this happen?: "
+ og_file['filename'],
exc_info=1,
file=og_file['filename'],
sample_id=sample_id,
accession_code=accession_code)
os.remove(og_file["absolute_path"])
# This is probably just a .txt file
else:
filename = dl_file_path.split('/')[-1]
sample_id = filename.split('_')[0]
actual_file = OriginalFile()
actual_file.is_downloaded = True
actual_file.is_archive = False
actual_file.absolute_file_path = dl_file_path
actual_file.filename = filename
actual_file.calculate_size()
actual_file.calculate_sha1()
actual_file.has_raw = True
actual_file.source_url = original_file.source_url
actual_file.source_filename = original_file.source_filename
actual_file.save()
for sample in related_samples:
new_association = OriginalFileSampleAssociation()
new_association.original_file = actual_file
new_association.sample = sample
new_association.save()
unpacked_sample_files.append(actual_file)
if len(unpacked_sample_files) > 0:
success = True
logger.debug("File downloaded and extracted successfully.",
url=url,
dl_file_path=dl_file_path,
downloader_job=job_id)
else:
success = False
logger.info("Unable to extract any files.",
url=url,
dl_file_path=dl_file_path,
downloader_job=job_id)
job.failure_reason = "Failed to extract any downloaded files."
if success:
utils.create_processor_jobs_for_original_files(unpacked_sample_files, job)
if original_file.is_archive:
original_file.delete_local_file()
utils.end_downloader_job(job, success)
return success
| |
from functools import partial
import gym
import numpy as np
from DeepRTS.Engine import Constants
from DeepRTS.python import util
from coding.util import LimitedDiscrete
class ScenarioData:
def __init__(self):
self.previous_statistic_gathered_gold = 0
def reset(self):
self.__init__()
class Scenario(gym.Env):
DEFAULTS = dict(
updates_per_action=1,
flatten=False
)
def __init__(self, config, game, *scenarios):
self.config = util.dict_update(Scenario.DEFAULTS.copy(), config)
self.game = game
# Struct that holds all data that needs to be stored during a episode
self.data = ScenarioData()
# Binds scenario requirements to *this*
self.scenarios = [partial(scenario, self) for scenario in scenarios]
# Define the action space
self.action_space = LimitedDiscrete(Constants.action_min, Constants.action_max)
# Define the observation space, here we assume that max is 255 (image) # TODO
self.observation_space = gym.spaces.Box(0, 255, shape=self.get_state().shape, dtype=np.float32)
def evaluate(self):
success, reward = zip(*[scenario() for scenario in self.scenarios])
return all(success), sum(reward)
def _optimal_play_sequence(self):
raise NotImplementedError("The function '_optimal_play_sequence' must be implemented!")
def _optimal_play_gamestep(self, total_steps=0, total_reward=0):
for _ in range(self.config["updates_per_action"]):
self.game.update()
t, r = self.evaluate()
total_reward += r
total_steps += 1
return total_steps, total_reward, t
def calculate_optimal_play(self):
self.reset()
player = self.game.selected_player
total_steps, total_reward, terminal = self._optimal_play_gamestep()
initial_build_handled = False
for action, unitID in self._optimal_play_sequence():
unit = self.game.get_unit_by_name_id(unitID)
if not initial_build_handled:
while unit.state.id == Constants.State.Building:
self._optimal_play_gamestep()
initial_build_handled = True
if not unit:
raise RuntimeError("Error in optimal_play_sequence. The unit with ID=%s was not found." % unitID)
player.set_targeted_unit_id(unit.id)
# Process game while unit is not idle
while unit.state.id != Constants.State.Idle: # TODO - convert from getter to property in binding
total_steps, total_reward, terminal = self._optimal_play_gamestep(total_steps, total_reward)
player.do_action(action)
terminal = False
while not terminal:
total_steps, total_reward, terminal = self._optimal_play_gamestep(total_steps, total_reward)
self.reset()
return total_steps, total_reward
@staticmethod
def _gold_collect(amount, player=0):
def wrap(self):
p = self.game.players[player]
return p.statistic_gathered_gold >= amount
return wrap
@staticmethod
def _gold_collect_increment(amount, reward_success=1, reward_fail=-0.01, player=0):
def wrap(self):
p = self.game.players[player]
diff = p.statistic_gathered_gold - self.data.previous_statistic_gathered_gold
self.data.previous_statistic_gathered_gold = p.statistic_gathered_gold
r = reward_success if diff > 0 else reward_fail
t = p.statistic_gathered_gold >= amount
return t, r
return wrap
@staticmethod
def _lumber_collect(amount, player=0):
def wrap(self):
p = self.game.players[player]
return p.statistic_gathered_lumber >= amount
return wrap
@staticmethod
def _oil_collect(amount, player=0):
def wrap(self):
p = self.game.players[player]
return p.statistic_gathered_oil >= amount
return wrap
@staticmethod
def _food_consumption(amount, player=0):
def wrap(self):
p = self.game.players[player]
return p.food_consumption >= amount
return wrap
@staticmethod
def _food_count(amount, player=0):
def wrap(self):
p = self.game.players[player]
return p.food >= amount
return wrap
@staticmethod
def _damage_done(amount, player=0):
def wrap(self):
p = self.game.players[player]
return p.statistic_damage_done >= amount
return wrap
@staticmethod
def _damage_taken(amount, player=0):
def wrap(self):
p = self.game.players[player]
return p.statistic_damage_taken >= amount
return wrap
@staticmethod
def _units_created(amount, player=0):
def wrap(self):
p = self.game.players[player]
return p.statistic_units_created >= amount
return wrap
@staticmethod
def _num_footman(amount, player=0):
def wrap(self):
p = self.game.players[player]
return p.num_footman >= amount
return wrap
@staticmethod
def _num_peasant(amount, player=0):
def wrap(self):
p = self.game.players[player]
return p.num_peasant >= amount
return wrap
@staticmethod
def _num_archer(amount, player=0):
def wrap(self):
p = self.game.players[player]
return p.num_archer >= amount
return wrap
@staticmethod
def _num_farm(amount, player=0):
def wrap(self):
p = self.game.players[player]
return p.num_farm >= amount
return wrap
@staticmethod
def _num_barracks(amount, player=0):
def wrap(self):
p = self.game.players[player]
return p.num_barrack >= amount
return wrap
@staticmethod
def _num_town_hall(amount, player=0):
def wrap(self):
p = self.game.players[player]
return p.num_town_hall >= amount
return wrap
def reset(self):
self.data.reset()
self.game.reset()
self.game.update()
self.game.render()
return self.get_state()
def get_state(self):
if self.config["flatten"]:
return self.game.get_state().flatten()
return self.game.get_state()
def step(self, action):
player = self.game.selected_player # py::return_value_policy::reference
player.do_action(action + 1)
for _ in range(self.config["updates_per_action"]):
self.game.update()
s1 = self.get_state()
t, r = self.evaluate()
return s1, r, t, {}
def render(self, mode='human'):
if mode == "human":
self.game.view()
GOLD_COLLECT = _gold_collect
GOLD_COLLECT_INCREMENT = _gold_collect_increment
OIL_COLLECT = _lumber_collect
LUMBER_COLLECT = _oil_collect
FOOD_CONSUMPTION = _food_consumption
FOOD_COUNT = _food_count
DAMAGE_DONE = _damage_done
DAMAGE_TAKEN = _damage_taken
UNITS_CREATED = _units_created
NUM_FOOTMAN = _num_footman
NUM_PEASANT = _num_peasant
NUM_ARCHER = _num_archer
NUM_FARM = _num_farm
NUM_BARRACKS = _num_barracks
NUM_TOWN_HALL = _num_town_hall
| |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
A "DAV object" is anything we get from the caldav server or push into the caldav server, notably principal, calendars and calendar events.
"""
import vobject
import io
import uuid
import re
import datetime
from lxml import etree
from caldav.lib import error, vcal, url
from caldav.lib.url import URL
from caldav.elements import dav, cdav
from caldav.lib.python_utilities import to_unicode
class DAVObject(object):
"""
Base class for all DAV objects. Can be instantiated by a client
and an absolute or relative URL, or from the parent object.
"""
id = None
url = None
client = None
parent = None
name = None
etag = None
def __init__(self, client=None, url=None, parent=None, name=None, id=None, etag=None, **extra):
"""
Default constructor.
Parameters:
* client: A DAVClient instance
* url: The url for this object. May be a full URL or a relative URL.
* parent: The parent object - used when creating objects
* name: A displayname
* id: The resource id (UID for an Event)
"""
if client is None and parent is not None:
client = parent.client
self.client = client
self.parent = parent
self.name = name
self.id = id
self.etag = etag
self.extra_init_options = extra
## url may be a path relative to the caldav root
if client and url:
self.url = client.url.join(url)
else:
self.url = URL.objectify(url)
@property
def canonical_url(self):
return str(self.url.unauth())
def children(self, type=None):
"""
List children, using a propfind (resourcetype) on the parent object,
at depth = 1.
"""
c = []
depth = 1
properties = {}
props = [dav.ResourceType(), ]
response = self._query_properties(props, depth)
properties = self._handle_prop_response(response=response, props=props, type=type, what='tag')
for path in list(properties.keys()):
resource_type = properties[path][dav.ResourceType.tag]
if resource_type == type or type is None:
## TODO: investigate the RFCs thoroughly - why does a "get
## members of this collection"-request also return the collection URL itself?
## And why is the strip_trailing_slash-method needed? The collection URL
## should always end with a slash according to RFC 2518, section 5.2.
if self.url.strip_trailing_slash() != self.url.join(path).strip_trailing_slash():
c.append((self.url.join(path), resource_type))
return c
def _query_properties(self, props=[], depth=0):
"""
This is an internal method for doing a propfind query. It's a
result of code-refactoring work, attempting to consolidate
similar-looking code into a common method.
"""
root = None
# build the propfind request
if len(props) > 0:
prop = dav.Prop() + props
root = dav.Propfind() + prop
return self._query(root, depth)
def _query(self, root=None, depth=0, query_method='propfind', url=None, expected_return_value=None):
"""
This is an internal method for doing a query. It's a
result of code-refactoring work, attempting to consolidate
similar-looking code into a common method.
"""
if url is None:
url = self.url
body = ""
if root:
body = etree.tostring(root.xmlelement(), encoding="utf-8",
xml_declaration=True)
ret = getattr(self.client, query_method)(
url, body, depth)
if ret.status == 404:
raise error.NotFoundError(ret.raw)
if (
(expected_return_value is not None and ret.status != expected_return_value) or
ret.status >= 400):
raise error.exception_by_method[query_method](ret.raw)
return ret
def _handle_prop_response(self, response, props=[], type=None, what='text'):
"""
Internal method to massage an XML response into a dict. (This
method is a result of some code refactoring work, attempting
to consolidate similar-looking code)
"""
properties = {}
# All items should be in a <D:response> element
for r in response.tree.findall('.//' + dav.Response.tag):
status = r.find('.//' + dav.Status.tag)
if not '200 ' in status.text and not '404 ' in status.text:
raise error.ReportError(response.raw) ## TODO: may be wrong error class
href = r.find('.//' + dav.Href.tag).text
properties[href] = {}
for p in props:
t = r.find(".//" + p.tag)
if t is None:
continue
if len(list(t)) > 0:
if type is not None:
val = t.findall(".//" + type)
else:
val = t.findall(".//*")
if len(val) > 0:
if hasattr(p, 'convert'):
val = p.convert(t)
else:
if len(val) == 1:
val = getattr(val[0], what)
else:
val = getattr(t, what)
else:
val = None
else:
val = t.text
properties[href][p.tag] = val
return properties
def get_properties(self, props=[], depth=0):
"""
Get properties (PROPFIND) for this object. Works only for
properties, that don't have complex types.
Parameters:
* props = [dav.ResourceType(), dav.DisplayName(), ...]
Returns:
* {proptag: value, ...}
"""
rc = None
response = self._query_properties(props, depth)
properties = self._handle_prop_response(response, props)
path = self.url.path
exchange_path = self.url.path + '/'
if path in list(properties.keys()):
rc = properties[path]
elif exchange_path in list(properties.keys()):
rc = properties[exchange_path]
elif len(properties.keys()) == 1:
rc = properties[properties.keys()[0]]
else:
raise Exception("The CalDAV server you are using has "
"a problem with path handling.")
return rc
def set_properties(self, props=[]):
"""
Set properties (PROPPATCH) for this object.
Parameters:
* props = [dav.DisplayName('name'), ...]
Returns:
* self
"""
prop = dav.Prop() + props
set = dav.Set() + prop
root = dav.PropertyUpdate() + set
r = self._query(root, query_method='proppatch')
statuses = r.tree.findall(".//" + dav.Status.tag)
for s in statuses:
if not '200 ' in s.text:
raise error.PropsetError(r.raw)
return self
def save(self):
"""
Save the object. This is an abstract method, that all classes
derived .from DAVObject implement.
Returns:
* self
"""
raise NotImplementedError()
def delete(self):
"""
Delete the object.
"""
if self.url is not None:
r = self.client.delete(self.url)
#TODO: find out why we get 404
if r.status not in (200, 204, 404):
raise error.DeleteError(r.raw)
def __str__(self):
return str(self.url)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.url)
class CalendarSet(DAVObject):
def calendars(self):
"""
List all calendar collections in this set.
Returns:
* [Calendar(), ...]
"""
cals = []
data = self.children(cdav.Calendar.tag)
for c_url, c_type in data:
cals.append(Calendar(self.client, c_url, parent=self))
return cals
def make_calendar(self, name=None, cal_id=None, supported_calendar_component_set=None):
"""
Utility method for creating a new calendar.
Parameters:
* name: the name of the new calendar
* cal_id: the uuid of the new calendar
* supported_calendar_component_set: what kind of objects (EVENT, VTODO, VFREEBUSY, VJOURNAL) the calendar should handle. Should be set to ['VTODO'] when creating a task list in Zimbra - in most other cases the default will be OK.
Returns:
* Calendar(...)-object
"""
return Calendar(self.client, name=name, parent=self, id=cal_id, supported_calendar_component_set=supported_calendar_component_set).save()
def calendar(self, name=None, cal_id=None):
"""
The calendar method will return a calendar object. It will not
initiate any communication with the server.
Parameters:
* name: return the calendar with this name
* cal_id: return the calendar with this calendar id
Returns:
* Calendar(...)-object
"""
return Calendar(self.client, name=name, parent = self,
url = self.url.join(cal_id), id=cal_id)
class Principal(DAVObject):
"""
This class represents a DAV Principal. It doesn't do much, except
keep track of the URLs for the calendar-home-set, etc.
"""
def __init__(self, client=None, url=None):
"""
Returns a Principal.
Parameters:
* client: a DAVClient() oject
* url: Deprecated - for backwards compatibility purposes only.
If url is not given, deduct principal path as well as calendar home set path from doing propfinds.
"""
self.client = client
self._calendar_home_set = None
## backwards compatibility.
if url is not None:
self.url = client.url.join(URL.objectify(url))
else:
self.url = self.client.url
cup = self.get_properties([dav.CurrentUserPrincipal()])
self.url = self.client.url.join(URL.objectify(cup['{DAV:}current-user-principal']))
def make_calendar(self, name=None, cal_id=None, supported_calendar_component_set=None):
"""
Convenience method, bypasses the self.calendar_home_set object.
See CalendarSet.make_calendar for details.
"""
return self.calendar_home_set.make_calendar(name, cal_id, supported_calendar_component_set=supported_calendar_component_set)
def calendar(self, name=None, cal_id=None):
"""
The calendar method will return a calendar object. It will not initiate any communication with the server.
"""
return self.calendar_home_set.calendar(name, cal_id)
@property
def calendar_home_set(self):
if not self._calendar_home_set:
chs = self.get_properties([cdav.CalendarHomeSet()])
self.calendar_home_set = chs['{urn:ietf:params:xml:ns:caldav}calendar-home-set']
return self._calendar_home_set
@calendar_home_set.setter
def calendar_home_set(self, url):
if isinstance(url, CalendarSet):
self._calendar_home_set = url
return
sanitized_url = URL.objectify(url)
if sanitized_url.hostname and sanitized_url.hostname != self.client.url.hostname:
## icloud (and others?) having a load balanced system, where each principal resides on one named host
self.client.url = sanitized_url
self._calendar_home_set = CalendarSet(self.client, self.client.url.join(sanitized_url))
def calendars(self):
"""
Return the principials calendars
"""
return self.calendar_home_set.calendars()
class Calendar(DAVObject):
"""
The `Calendar` object is used to represent a calendar collection.
Refer to the RFC for details: http://www.ietf.org/rfc/rfc4791.txt
"""
def _create(self, name, id=None, supported_calendar_component_set=None):
"""
Create a new calendar with display name `name` in `parent`.
"""
if id is None:
id = str(uuid.uuid1())
self.id = id
path = self.parent.url.join(id)
self.url = path
## TODO: mkcalendar seems to ignore the body on most servers?
## at least the name doesn't get set this way.
## zimbra gives 500 (!) if body is omitted ...
cal = cdav.CalendarCollection()
coll = dav.Collection() + cal
type = dav.ResourceType() + coll
prop = dav.Prop() + [type,]
if name:
display_name = dav.DisplayName(name)
prop += [display_name,]
if supported_calendar_component_set:
sccs = cdav.SupportedCalendarComponentSet()
for scc in supported_calendar_component_set:
sccs += cdav.Comp(scc)
prop += sccs
set = dav.Set() + prop
mkcol = cdav.Mkcalendar() + set
r = self._query(root=mkcol, query_method='mkcalendar', url=path, expected_return_value=201)
if name:
try:
self.set_properties([display_name])
except:
self.delete()
raise
## Special hack for Zimbra! The calendar we've made exists at
## the specified URL, and we can do operations like ls, even
## PUT an event to the calendar. Zimbra will enforce that the
## event uuid matches the event url, and return either 201 or
## 302 - but alas, try to do a GET towards the event and we
## get 404! But turn around and replace the calendar ID with
## the calendar name in the URL and hey ... it works!
## TODO: write test cases for calendars with non-trivial
## names and calendars with names already matching existing
## calendar urls and ensure they pass.
zimbra_url = self.parent.url.join(name)
try:
ret = self.client.request(zimbra_url)
if ret.status == 404:
raise error.NotFoundError
## insane server
self.url = zimbra_url
except error.NotFoundError:
## sane server
pass
def add_event(self, ical):
"""
Add a new event to the calendar, with the given ical.
Parameters:
* ical - ical object (text)
"""
return Event(self.client, data = ical, parent = self).save()
def add_todo(self, ical):
"""
Add a new task to the calendar, with the given ical.
Parameters:
* ical - ical object (text)
"""
return Todo(self.client, data=ical, parent=self).save()
def add_journal(self, ical):
"""
Add a new journal entry to the calendar, with the given ical.
Parameters:
* ical - ical object (text)
"""
return Journal(self.client, data=ical, parent=self).save()
def save(self):
"""
The save method for a calendar is only used to create it, for now.
We know we have to create it when we don't have a url.
Returns:
* self
"""
if self.url is None:
self._create(name=self.name, id=self.id, **self.extra_init_options)
if not self.url.endswith('/'):
self.url = URL.objectify(str(self.url) + '/')
return self
def date_search(self, start, end=None):
"""
Search events by date in the calendar. Recurring events are
expanded if they are occuring during the specified time frame
and if an end timestamp is given.
Parameters:
* start = datetime.today().
* end = same as above.
Returns:
* [Event(), ...]
"""
matches = []
# build the request
## Some servers will raise an error if we send the expand flag
## but don't set any end-date - expand doesn't make much sense
## if we have one recurring event describing an indefinite
## series of events. Hence, if the end date is not set, we
## skip asking for expanded events.
if end:
data = cdav.CalendarData() + cdav.Expand(start, end)
else:
data = cdav.CalendarData()
prop = dav.Prop() + data + dav.Getetag()
range = cdav.TimeRange(start, end)
vevent = cdav.CompFilter("VEVENT") + range
vcalendar = cdav.CompFilter("VCALENDAR") + vevent
filter = cdav.Filter() + vcalendar
root = cdav.CalendarQuery() + [prop, filter]
response = self._query(root, 1, 'report')
results = self._handle_prop_response(response=response, props=[cdav.CalendarData(), dav.Getetag])
for r in results:
matches.append(
Event(self.client, url=self.url.join(r), data=results[r][cdav.CalendarData.tag], parent=self,
etag=results[r][dav.Getetag.tag]))
return matches
def freebusy_request(self, start, end):
"""
Search the calendar, but return only the free/busy information.
Parameters:
* start = datetime.today().
* end = same as above.
Returns:
* [FreeBusy(), ...]
"""
root = cdav.FreeBusyQuery() + [ cdav.TimeRange(start, end) ]
response = self._query(root, 1, 'report')
return FreeBusy(self, response.raw)
def journals(self):
"""
fetches a list of journal entries.
"""
def todos(self, sort_key='due', include_completed=False):
"""
fetches a list of todo events.
Parameters:
* sort_key: use this field in the VTODO for sorting (lower case string, i.e. 'priority').
* include_completed: boolean - by default, only pending tasks are listed
"""
## ref https://www.ietf.org/rfc/rfc4791.txt, section 7.8.9
matches = []
# build the request
data = cdav.CalendarData()
prop = dav.Prop() + data
if not include_completed:
vnotcompleted = cdav.TextMatch('COMPLETED', negate=True)
vnotcancelled = cdav.TextMatch('CANCELLED', negate=True)
vstatus = cdav.PropFilter('STATUS') + vnotcancelled + vnotcompleted
vnocompletedate = cdav.PropFilter('COMPLETED') + cdav.NotDefined()
vtodo = cdav.CompFilter("VTODO") + vnocompletedate + vstatus
else:
vtodo = cdav.CompFilter("VTODO")
vcalendar = cdav.CompFilter("VCALENDAR") + vtodo
filter = cdav.Filter() + vcalendar
root = cdav.CalendarQuery() + [prop, filter]
response = self._query(root, 1, 'report')
results = self._handle_prop_response(response=response, props=[cdav.CalendarData()])
for r in results:
matches.append(
Todo(self.client, url=self.url.join(r), data=results[r][cdav.CalendarData.tag], parent=self))
def sort_key_func(x):
val = getattr(x.instance.vtodo, sort_key, None)
if not val:
return None
val = val.value
if hasattr(val, 'strftime'):
return val.strftime('%F%H%M%S')
return val
if sort_key:
matches.sort(key=sort_key_func)
return matches
def _calendar_comp_class_by_data(self, data):
for line in data.split('\n'):
if line == 'BEGIN:VEVENT':
return Event
if line == 'BEGIN:VTODO':
return Todo
if line == 'BEGIN:VJOURNAL':
return Journal
if line == 'BEGIN:VFREEBUSY':
return FreeBusy
def event_by_url(self, href, data=None):
"""
Returns the event with the given URL
"""
return Event(url=href, data=data, parent=self).load()
def object_by_uid(self, uid, comp_filter=None):
"""
Get one event from the calendar.
Parameters:
* uid: the event uid
Returns:
* Event() or None
"""
data = cdav.CalendarData()
prop = dav.Prop() + data
query = cdav.TextMatch(uid)
query = cdav.PropFilter("UID") + query
if comp_filter:
query = comp_filter + query
vcalendar = cdav.CompFilter("VCALENDAR") + query
filter = cdav.Filter() + vcalendar
root = cdav.CalendarQuery() + [prop, filter]
response = self._query(root, 1, 'report')
if response.status == 404:
raise error.NotFoundError(response.raw)
elif response.status == 400:
raise error.ReportError(response.raw)
r = response.tree.find(".//" + dav.Response.tag)
if r is not None:
href = r.find(".//" + dav.Href.tag).text
data = r.find(".//" + cdav.CalendarData.tag).text
return self._calendar_comp_class_by_data(data)(self.client, url=URL.objectify(href), data=data, parent=self)
else:
raise error.NotFoundError(response.raw)
def event_by_uid(self, uid):
return self.object_by_uid(uid, comp_filter=cdav.CompFilter("VEVENT"))
## alias for backward compatibility
event = event_by_uid
def events_etag(self):
"""
List all events from the calendar with etag.
Returns:
* [Event(), ...]
"""
all = []
prop = dav.Prop() + dav.Getetag()
vevent = cdav.CompFilter("VEVENT")
vcalendar = cdav.CompFilter("VCALENDAR") + vevent
filter = cdav.Filter() + vcalendar
root = cdav.CalendarQuery() + [prop, filter]
response = self._query(root, 1, query_method='report')
results = self._handle_prop_response(response, props=[dav.Getetag()])
for r in results:
all.append(Event(self.client, url=self.url.join(r), parent=self, etag=results[r][dav.Getetag.tag]))
return all
def events_list(self, uids):
"""
List events from the calendar by the list of uids.
Returns:
* [Event(), ...]
"""
all = []
data = cdav.CalendarData()
prop = dav.Prop() + data + dav.Getetag()
vevent = cdav.CompFilter("VEVENT")
vcalendar = cdav.CompFilter("VCALENDAR") + vevent
filter = cdav.Filter() + vcalendar
root = cdav.CalendarMultiget() + [prop, filter]
root += map(lambda x: dav.Href(value=x), uids)
response = self._query(root, 1, query_method='report')
results = self._handle_prop_response(response, props=[cdav.CalendarData(), dav.Getetag()])
for r in results:
all.append(Event(self.client, url=self.url.join(r), data=results[r][cdav.CalendarData.tag], parent=self,
etag=results[r][dav.Getetag.tag]))
return all
def events(self):
"""
List all events from the calendar.
Returns:
* [Event(), ...]
"""
all = []
data = cdav.CalendarData()
prop = dav.Prop() + data + dav.Getetag()
vevent = cdav.CompFilter("VEVENT")
vcalendar = cdav.CompFilter("VCALENDAR") + vevent
filter = cdav.Filter() + vcalendar
root = cdav.CalendarQuery() + [prop, filter]
response = self._query(root, 1, query_method='report')
results = self._handle_prop_response(response, props=[cdav.CalendarData(), dav.Getetag()])
for r in results:
all.append(Event(self.client, url=self.url.join(r), data=results[r][cdav.CalendarData.tag], parent=self,
etag=results[r][dav.Getetag.tag]))
return all
def journals(self):
"""
List all journals from the calendar.
Returns:
* [Journal(), ...]
"""
## TODO: this is basically a copy of events() - can we do more
## refactoring and consolidation here? Maybe it's wrong to do
## separate methods for journals, todos and events?
all = []
data = cdav.CalendarData()
prop = dav.Prop() + data
vevent = cdav.CompFilter("VJOURNAL")
vcalendar = cdav.CompFilter("VCALENDAR") + vevent
filter = cdav.Filter() + vcalendar
root = cdav.CalendarQuery() + [prop, filter]
response = self._query(root, 1, query_method='report')
results = self._handle_prop_response(response, props=[cdav.CalendarData()])
for r in results:
all.append(Journal(self.client, url=self.url.join(r), data=results[r][cdav.CalendarData.tag], parent=self))
return all
class CalendarObjectResource(DAVObject):
"""
Ref RFC 4791, section 4.1, a "Calendar Object Resource" can be an
event, a todo-item, a journal entry, a free/busy entry, etc.
"""
_instance = None
_data = None
def __init__(self, client=None, url=None, data=None, parent=None, id=None, etag=None):
"""
CalendarObjectResource has an additional parameter for its constructor:
* data = "...", vCal data for the event
"""
DAVObject.__init__(self, client=client, url=url, parent=parent, id=id, etag=etag)
if data is not None:
self.data = data
def load(self):
"""
Load the object from the caldav server.
"""
r = self.client.request(self.url)
if r.status == 404:
raise error.NotFoundError(r.raw)
self.data = vcal.fix(r.raw)
return self
def _create(self, data, id=None, path=None):
if id is None and path is not None and str(path).endswith('.ics'):
id = re.search('(/|^)([^/]*).ics',str(path)).group(2)
elif id is None:
for obj in ('vevent', 'vtodo', 'vjournal', 'vfreebusy'):
if hasattr(self.instance, obj):
id = getattr(self.instance, obj).uid.value
break
if path is None:
path = id + ".ics"
path = self.parent.url.join(path)
r = self.client.put(path, data,
{"Content-Type": 'text/calendar; charset="utf-8"'})
if r.status == 302:
path = [x[1] for x in r.headers if x[0]=='location'][0]
elif not (r.status in (204, 201)):
raise error.PutError(r.raw)
self.url = URL.objectify(path)
self.id = id
def save(self):
"""
Save the object, can be used for creation and update.
Returns:
* self
"""
if self._instance is not None:
path = self.url.path if self.url else None
self._create(self._instance.serialize(), self.id, path)
return self
def __str__(self):
return "%s: %s" % (self.__class__.__name__, self.url)
def _set_data(self, data):
self._data = vcal.fix(data)
self._instance = vobject.readOne(self._data)
return self
def _get_data(self):
return self._data
data = property(_get_data, _set_data,
doc="vCal representation of the object")
def _set_instance(self, inst):
self._instance = inst
self._data = inst.serialize()
return self
def _get_instance(self):
return self._instance
instance = property(_get_instance, _set_instance,
doc="vobject instance of the object")
class Event(CalendarObjectResource):
"""
The `Event` object is used to represent an event (VEVENT).
"""
pass
class Journal(CalendarObjectResource):
"""
The `Journal` object is used to represent a journal entry (VJOURNAL).
"""
pass
class FreeBusy(CalendarObjectResource):
"""
The `FreeBusy` object is used to represent a freebusy response from the server.
"""
def __init__(self, parent, data):
"""
A freebusy response object has no URL or ID (TODO: reconsider the
class hierarchy? most of the inheritated methods are moot and
will fail?). Raw response can be accessed through self.data,
instantiated vobject as self.instance.
"""
CalendarObjectResource.__init__(self, client=parent.client, url=None, data=data, parent=parent, id=None)
class Todo(CalendarObjectResource):
"""
The `Todo` object is used to represent a todo item (VTODO).
"""
def complete(self, completion_timestamp=None):
"""
Marks the task as completed.
Parameters:
* completion_timestamp - datetime object. Defaults to datetime.datetime.now().
"""
if not completion_timestamp:
completion_timestamp = datetime.datetime.now()
if not hasattr(self.instance.vtodo, 'status'):
self.instance.vtodo.add('status')
self.instance.vtodo.status.value = 'COMPLETED'
self.instance.vtodo.add('completed').value = completion_timestamp
self.save()
| |
import os,sys
import unittest
from shutil import rmtree, copy
import itertools
import fnmatch
from ticdat.utils import dictish, containerish, verify
import unittest
from ticdat import TicDatFactory, Model, Slicer, PanDatFactory
__codeFile = []
def _codeFile() :
if __codeFile:
return __codeFile[0]
import inspect
__codeFile[:]=[os.path.abspath(inspect.getsourcefile(_codeFile))]
return _codeFile()
def _codeDir():
return os.path.dirname(_codeFile())
def get_testing_file_path(base_name):
rtn = os.path.join(_codeDir(), base_name)
assert os.path.exists(rtn)
return rtn
am_on_windows = sys.platform in ['win32']
def configure_blank_accdb():
verify(os.path.isfile("blank.accdb"),
"You need a blank.accdb file in your current directory.")
mdb_dir = os.path.abspath(os.path.join(_codeDir(), ".."))
v_str = "Contact ticdat support at ticdat@opalytics.com"
verify(os.path.isdir(mdb_dir), "%s is strangely not a directory. %s"%(mdb_dir, v_str))
verify(os.path.isfile(os.path.join(mdb_dir, "mdb.py")), "mdb.py is missing. %s"%v_str)
copy("blank.accdb", mdb_dir)
def configure_oplrun_path():
if am_on_windows:
oplrun_name = os.path.abspath('oplrun.exe')
else:
oplrun_name = os.path.abspath('oplrun')
verify(os.path.isfile(oplrun_name), "You need to be in the directory containing oplrun")
opl_dir = os.path.abspath(os.path.join(_codeDir(), ".."))
v_str = "Contact ticdat support at ticdat@opalytics.com"
verify(os.path.isdir(opl_dir), "%s is strangely not a directory. %s"%(opl_dir, v_str))
verify(os.path.isfile(os.path.join(opl_dir,"opl.py")), "opl.py is missing. %s"%v_str)
oplrun_path = os.path.abspath(oplrun_name)
with open(os.path.join(opl_dir, "oplrun_path.txt"), "w") as f:
f.write(oplrun_path)
def configure_runlingo_path():
if am_on_windows:
runlingo_name = os.path.abspath('runlingo.exe')
else:
runlingo_name = os.path.abspath('runlingo')
verify(os.path.isfile(runlingo_name), "You need to be in the directory containing runlingo")
lingo_dir = os.path.abspath(os.path.join(_codeDir(), ".."))
v_str = "Contact ticdat support at ticdat@opalytics.com"
verify(os.path.isdir(lingo_dir), "%s is strangely not a directory. %s"%(lingo_dir, v_str))
verify(os.path.isfile(os.path.join(lingo_dir,"lingo.py")), "opl.py is missing. %s"%v_str)
runlingo_path = os.path.abspath(runlingo_name)
with open(os.path.join(lingo_dir, "runlingo_path.txt"), "w") as f:
f.write(runlingo_path)
_debug = []
def _asserting() :
_debug.append(())
return _debug
assert _asserting()
def DEBUG() :
return bool(_debug)
def firesException(f) :
try:
f()
except Exception as e:
return e
_memo = []
def memo(*args):
rtn = list(_memo)
_memo[:] = args
return rtn[0] if len(rtn) == 1 else rtn
def clean_denormalization_errors(chk):
return {k:{_k:set(_v) for _k,_v in v.items()} for k,v in chk.items()}
def fail_to_debugger(cls) :
"""
decorator to allow a unittest class to enter the debugger if a unit test fails
:param cls: a unittest class
:return: cls decorated so as to fail to the ipdb debugger
CAVEATS : Will side effect the unittest module by redirecting the main function!
This routine is intended for **temporary** decorating of a unittest class
for debugging/troubleshooting.
"""
def _falseTriggersDebugger(x) :
if not (x) :
import ipdb; ipdb.set_trace()
assert(x)
cls.assertTrue = lambda self, *args : _falseTriggersDebugger(args[0])
cls.assertFalse = lambda self, *args : _falseTriggersDebugger(not args[0])
cls.failToDebugger = True
unittest.main = lambda : _runSuite(cls)
return cls
def flagged_as_run_alone(f):
"""
a decorator to flag a unittest test function to be the sole test run for
a fail_to_debugger decorated class
:param f: a unittest test function
:return: the same function decorated for fail_to_debugger
"""
f.runAlone = True
return f
def _runSuite(cls):
_rtn = [getattr(cls, x) for x in dir(cls)
if x.startswith("test")]
assert all(callable(x) for x in _rtn)
runalones = [x for x in _rtn if hasattr(x, "runAlone")]
assert len(runalones) <= 1, "you specified more than one to runAlone!"
if runalones:
_rtn = [runalones[0].__name__]
else:
_rtn = [x.__name__ for x in _rtn ]
suite = unittest.TestSuite()
for x in _rtn :
suite.addTest(cls(x))
if "failToDebugger" in dir(cls) and cls.failToDebugger :
print("!!! Debugging suite for " + str(cls) + " !!!\n")
suite.debug()
print("!!! Debugged suite for " + str(cls) + " !!!\n")
else :
unittest.TextTestRunner().run(suite)
def unixWildCardDir(dirPath, unixStyleStr, fullPath = True, collapseSingleton = True, allowLinks = False) :
assert os.path.isdir(dirPath)
rtn = [x for x in os.listdir(dirPath) if fnmatch.fnmatch(x, unixStyleStr)]
if (fullPath) :
rtn = [os.path.abspath(os.path.join(dirPath, x)) for x in rtn]
if (not allowLinks) :
rtn = [x for x in rtn if not os.path.islink(x) ]
if ( (len(rtn) == 1) and collapseSingleton):
return rtn[0]
return rtn
def findAllUnixWildCard(directory, unixStyleStr, fullPath = True, recursive=True, allowLinks = False, skippedDirs = None):
_skippedDirs = skippedDirs or [] # Py does scary stuff with mutable default arguments
assert(os.path.isdir(directory))
assert (not _skippedDirs) or (recursive), "skipping directories makes sense only for recursive"
rtn = unixWildCardDir(directory, unixStyleStr, fullPath = fullPath,
collapseSingleton = False, allowLinks = allowLinks)
dirList=os.listdir(directory)
for fname in dirList:
fname = os.path.join(directory, fname)
if ( (os.path.isdir(fname)) and recursive and fname not in _skippedDirs) :
rtn = rtn + findAllUnixWildCard(fname, unixStyleStr, recursive = True,
allowLinks = allowLinks, skippedDirs = _skippedDirs)
return rtn
def findAllFiles(path, extensions) :
assert os.path.isdir(path)
return list(itertools.chain(*[findAllUnixWildCard(path, "*" + x) for x in extensions]))
def makeCleanDir(path) :
assert not os.path.exists(path) or os.path.isdir(path)
rmtree(path, ignore_errors = True)
os.mkdir(path)
return path
def makeCleanPath(path) :
if os.path.exists(path) :
if os.path.isdir(path) :
makeCleanDir(path)
else :
os.remove(path)
return path
#assert is magic. It can't be passed around directly. Lambda encapsulation lets us pass around a proxy. Yay lambda
def assertTrue(x) :
assert (x)
def assertFalse(x) :
assert (not x)
def spacesSchema() :
return {"a_table" : [("a Field",),("a Data 1", "a Data 2", "a Data 3") ],
"b_table" : [("b Field 1", "b Field 2", "b Field 3"), ["b Data"]],
"c_table" : [[],("c Data 1", "c Data 2", "c Data 3", "c Data 4")]}
def spacesData() :
return {
"a_table" : {1 : {"a Data 3":3, "a Data 2":2, "a Data 1":1},
"b" : ("b", "d", 12), 0.23 : (11, 12, "thirt")},
"b_table" : {(1, 2, 3) : 1, ("a", "b", "b") : 12},
"c_table" : ((1, 2, 3, 4),
{"c Data 4":"d", "c Data 2":"b", "c Data 3":"c", "c Data 1":"a"},
("a", "b", 12, 24) )
}
def assertTicDatTablesSame(t1, t2, _goodTicDatTable,
_assertTrue = assertTrue, _assertFalse = assertFalse) :
_assertTrue(set(t1) == set(t2))
_assertTrue(_goodTicDatTable(t1) and _goodTicDatTable(t2))
if not dictish(t1) and not dictish(t2) :
return
if dictish(t1) != dictish(t2) and dictish(t2) :
t1,t2 = t2,t1
if not dictish(t2) :
_assertTrue(all(containerish(x) and len(x) == 0 for x in t1.values()))
return
for k1,v1 in t1.items() :
v2 = t2[k1]
if dictish(v1) != dictish(v2) and dictish(v2) :
v2, v1 = v1, v2
if dictish(v1) and dictish(v2) :
_assertTrue(set(v1) == set(v2))
for _k1 in v1 :
_assertTrue(v1[_k1] == v2[_k1])
elif dictish(v1) and containerish(v2) :
_assertTrue(sorted(map(str, v1.values())) == sorted(map(str, v2)))
elif dictish(v1) :
_assertTrue(len(v1) == 1 and v1.values()[0] == v2)
else :
if containerish(v1) != containerish(v2) and containerish(v2) :
v2, v1 = v1, v2
if containerish(v1) and containerish(v2) :
_assertTrue(len(v1) == len(v2))
_assertTrue(all(v1[x] == v2[x] for x in range(len(v1))))
elif containerish(v1) :
_assertTrue(len(v1) == 1 and v1[0] == v2)
else :
_assertTrue(v1 == v2)
def deepFlatten(x) :
# does a FULL recursive flatten.
# this works for 2.7, will need to be replaced for 3
# make sure replaced version works equally well with tuples as lists
import compiler
return tuple(compiler.ast.flatten(x))
def shallowFlatten(x) :
return tuple(itertools.chain(*x))
# gurobi netflow problem - http://www.gurobi.com/documentation/6.0/example-tour/netflow_py
def netflowSchema():
return {
"commodities" : [["name"], []],
"nodes": [["name"],[]],
"arcs" : [("source", "destination"),["capacity"]],
"cost" : [("commodity", "source", "destination"),["cost"]],
"inflow" :[["commodity", "node"], ["quantity"]],
}
def netflowData() :
class _(object) :
pass
dat = _() # simplest object with a __dict__
# simplest possible copy
dat.commodities = ['Pencils', 'Pens']
dat.nodes = ['Detroit', 'Denver', 'Boston', 'New York', 'Seattle']
dat.arcs = {
('Detroit', 'Boston'): 100,
('Detroit', 'New York'): 80,
('Detroit', 'Seattle'): 120,
('Denver', 'Boston'): 120,
('Denver', 'New York'): 120,
('Denver', 'Seattle'): 120 }
dat.cost = {
('Pencils', 'Detroit', 'Boston'): 10,
('Pencils', 'Detroit', 'New York'): 20,
('Pencils', 'Detroit', 'Seattle'): 60,
('Pencils', 'Denver', 'Boston'): 40,
('Pencils', 'Denver', 'New York'): 40,
('Pencils', 'Denver', 'Seattle'): 30,
('Pens', 'Detroit', 'Boston'): 20,
('Pens', 'Detroit', 'New York'): 20,
('Pens', 'Detroit', 'Seattle'): 80,
('Pens', 'Denver', 'Boston'): 60,
('Pens', 'Denver', 'New York'): 70,
('Pens', 'Denver', 'Seattle'): 30 }
dat.inflow = {
('Pencils', 'Detroit'): 50,
('Pencils', 'Denver'): 60,
('Pencils', 'Boston'): -50,
('Pencils', 'New York'): -50,
('Pencils', 'Seattle'): -10,
('Pens', 'Detroit'): 60,
('Pens', 'Denver'): 40,
('Pens', 'Boston'): -40,
('Pens', 'New York'): -30,
('Pens', 'Seattle'): -30 }
return dat
def copy_to_pandas_with_reset(tdf, dat):
rtn = tdf.copy_to_pandas(dat, drop_pk_columns=False)
for t in tdf.all_tables:
getattr(rtn, t).reset_index(drop=True, inplace=True)
return rtn
def netflowPandasData():
tdf = TicDatFactory(**netflowSchema())
dat = tdf.copy_tic_dat(netflowData())
return copy_to_pandas_with_reset(tdf, dat)
def addNetflowForeignKeys(tdf) :
tdf.add_foreign_key("arcs", "nodes", [u'source', u'name'])
tdf.add_foreign_key("arcs", "nodes", (u'destination', u'name'))
tdf.add_foreign_key("cost", "nodes", (u'source', u'name'))
tdf.add_foreign_key("cost", "nodes", [u'destination', u'name'])
tdf.add_foreign_key("cost", "commodities", (u'commodity', u'name'))
tdf.add_foreign_key("inflow", "commodities", (u'commodity', u'name'))
tdf.add_foreign_key("inflow", "nodes", [u'node', u'name'])
def addNetflowDataTypes(tdf):
tdf.set_data_type("arcs", "capacity")
tdf.set_data_type("cost", "cost")
tdf.set_data_type("inflow", "quantity", min=-float("inf"),
inclusive_min=False)
# gurobi diet problem - http://www.gurobi.com/documentation/6.0/example-tour/diet_py
def dietSchema():
return {
"categories" : (("name",),["minNutrition", "maxNutrition"]),
"foods" :[["name"],("cost",)],
"nutritionQuantities" : (["food", "category"], ["qty"])
}
def dietSchemaWeirdCase():
return {
"cateGories" : (("name",),["miNnutrition", "maXnutrition"]),
"foodS" :[["name"],("COST",)],
"nutritionquantities" : (["food", "category"], ["qtY"])
}
def copyDataDietWeirdCase(dat):
tdf = TicDatFactory(**dietSchemaWeirdCase())
rtn = tdf.TicDat()
for c,r in dat.categories.items():
rtn.cateGories[c]["miNnutrition"] = r["minNutrition"]
rtn.cateGories[c]["maXnutrition"] = r["maxNutrition"]
for f,r in dat.foods.items():
rtn.foodS[f] = r["cost"]
for (f,c),r in dat.nutritionQuantities.items():
rtn.nutritionquantities[f,c] = r["qty"]
return rtn
def dietSchemaWeirdCase2():
rtn = dietSchemaWeirdCase()
rtn["nutrition_quantities"] = rtn["nutritionquantities"]
del(rtn["nutritionquantities"])
return rtn
def copyDataDietWeirdCase2(dat):
tdf = TicDatFactory(**dietSchemaWeirdCase2())
tmp = copyDataDietWeirdCase(dat)
rtn = tdf.TicDat(cateGories = tmp.cateGories, foodS = tmp.foodS)
for (f,c),r in tmp.nutritionquantities.items():
rtn.nutrition_quantities[f,c] = r
return rtn
def addDietForeignKeys(tdf) :
tdf.add_foreign_key("nutritionQuantities", 'categories',[u'category', u'name'])
tdf.add_foreign_key("nutritionQuantities", 'foods', (u'food', u'name'))
def addDietDataTypes(tdf):
for table, fields in tdf.data_fields.items():
for field in fields:
tdf.set_data_type(table, field)
# We override the default data type for maxNutrition which can accept infinity
tdf.set_data_type("categories", "maxNutrition", max=float("inf"),
inclusive_max=True)
def dietData():
# this is the gurobi diet data in ticDat format
class _(object) :
pass
dat = _() # simplest object with a __dict__
dat.categories = {
'calories': {"minNutrition": 1800, "maxNutrition" : 2200},
'protein': {"minNutrition": 91, "maxNutrition" : float("inf")},
'fat': {"minNutrition": 0, "maxNutrition" : 65},
'sodium': {"minNutrition": 0, "maxNutrition" : 1779}}
# deliberately goofing on it a little bit
dat.foods = {
'hamburger': {"cost": 2.49},
'chicken': {"cost": 2.89},
'hot dog': {"cost": 1.50},
'fries': {"cost": 1.89},
'macaroni': 2.09,
'pizza': {"cost": 1.99},
'salad': {"cost": 2.49},
'milk': (0.89,),
'ice cream': {"cost": 1.59}}
dat.nutritionQuantities = {
('hamburger', 'calories'): {"qty" : 410},
('hamburger', 'protein'): {"qty" : 24},
('hamburger', 'fat'): {"qty" : 26},
('hamburger', 'sodium'): {"qty" : 730},
('chicken', 'calories'): {"qty" : 420},
('chicken', 'protein'): {"qty" : 32},
('chicken', 'fat'): {"qty" : 10},
('chicken', 'sodium'): {"qty" : 1190},
('hot dog', 'calories'): {"qty" : 560},
('hot dog', 'protein'): {"qty" : 20},
('hot dog', 'fat'): {"qty" : 32},
('hot dog', 'sodium'): {"qty" : 1800},
('fries', 'calories'): {"qty" : 380},
('fries', 'protein'): {"qty" : 4},
('fries', 'fat'): {"qty" : 19},
('fries', 'sodium'): {"qty" : 270},
('macaroni', 'calories'): {"qty" : 320},
('macaroni', 'protein'): {"qty" : 12},
('macaroni', 'fat'): {"qty" : 10},
('macaroni', 'sodium'): {"qty" : 930},
('pizza', 'calories'): {"qty" : 320},
('pizza', 'protein'): {"qty" : 15},
('pizza', 'fat'): {"qty" : 12},
('pizza', 'sodium'): {"qty" : 820},
('salad', 'calories'): {"qty" : 320},
('salad', 'protein'): {"qty" : 31},
('salad', 'fat'): {"qty" : 12},
('salad', 'sodium'): {"qty" : 1230},
('milk', 'calories'): {"qty" : 100},
('milk', 'protein'): {"qty" : 8},
('milk', 'fat'): {"qty" : 2.5},
('milk', 'sodium'): {"qty" : 125},
('ice cream', 'calories'): {"qty" : 330},
('ice cream', 'protein'): {"qty" : 8},
('ice cream', 'fat'): {"qty" : 10},
('ice cream', 'sodium'): {"qty" : 180} }
return dat
def dietSolver(modelType):
tdf = TicDatFactory(**dietSchema())
addDietForeignKeys(tdf)
addDietDataTypes(tdf)
dat = tdf.copy_tic_dat(dietData())
assert not tdf.find_data_type_failures(dat) and not tdf.find_foreign_key_failures(dat)
mdl = Model(modelType, "diet")
nutrition = {}
for c,n in dat.categories.items() :
nutrition[c] = mdl.add_var(lb=n["minNutrition"], ub=n["maxNutrition"], name=c)
# Create decision variables for the foods to buy
buy = {}
for f in dat.foods:
buy[f] = mdl.add_var(name=f)
# Nutrition constraints
for c in dat.categories:
mdl.add_constraint(mdl.sum(dat.nutritionQuantities[f,c]["qty"] * buy[f]
for f in dat.foods)
== nutrition[c],
name = c)
mdl.set_objective(mdl.sum(buy[f] * c["cost"] for f,c in dat.foods.items()))
if mdl.optimize():
solutionFactory = TicDatFactory(
parameters = [[],["totalCost"]],
buyFood = [["food"],["qty"]],
consumeNutrition = [["category"],["qty"]])
sln = solutionFactory.TicDat()
for f,x in buy.items():
if mdl.get_solution_value(x) > 0.0001:
sln.buyFood[f] = mdl.get_solution_value(x)
for c,x in nutrition.items():
sln.consumeNutrition[c] = mdl.get_solution_value(x)
return sln, sum(dat.foods[f]["cost"] * r["qty"] for f,r in sln.buyFood.items())
def netflowSolver(modelType):
tdf = TicDatFactory(**netflowSchema())
addNetflowForeignKeys(tdf)
addNetflowDataTypes(tdf)
dat = tdf.copy_tic_dat(netflowData())
assert not tdf.find_data_type_failures(dat) and not tdf.find_foreign_key_failures(dat)
mdl = Model(modelType, "netflow")
flow = {}
for h, i, j in dat.cost:
if (i,j) in dat.arcs:
flow[h,i,j] = mdl.add_var(name='flow_%s_%s_%s' % (h, i, j))
flowslice = Slicer(flow)
for i_,j_ in dat.arcs:
mdl.add_constraint(mdl.sum(flow[h,i,j] for h,i,j in flowslice.slice('*',i_, j_))
<= dat.arcs[i_,j_]["capacity"],
name = 'cap_%s_%s' % (i_, j_))
for h_,j_ in set(k for k,v in dat.inflow.items() if abs(v["quantity"]) > 0).union(
{(h,i) for h,i,j in flow}, {(h,j) for h,i,j in flow}) :
mdl.add_constraint(
mdl.sum(flow[h,i,j] for h,i,j in flowslice.slice(h_,'*',j_)) +
dat.inflow.get((h_,j_), {"quantity":0})["quantity"] ==
mdl.sum(flow[h,i,j] for h,i,j in flowslice.slice(h_, j_, '*')),
name = 'node_%s_%s' % (h_, j_))
mdl.set_objective(mdl.sum(flow * dat.cost[h, i, j]["cost"] for (h, i, j),flow in flow.items()))
if mdl.optimize():
solutionFactory = TicDatFactory(
flow = [["commodity", "source", "destination"], ["quantity"]])
if mdl.optimize():
rtn = solutionFactory.TicDat()
for (h, i, j),var in flow.items():
if mdl.get_solution_value(var) > 0:
rtn.flow[h,i,j] = mdl.get_solution_value(var)
return rtn, sum(dat.cost[h,i,j]["cost"] * r["quantity"] for (h,i,j),r in rtn.flow.items())
def sillyMeSchema() :
return {"a" : [("aField",),("aData1", "aData2", "aData3") ],
"b" : [("bField1", "bField2", "bField3"), ["bData"]],
"c" : [[],("cData1", "cData2", "cData3", "cData4")]}
def sillyMeData() :
return {
"a" : {1 : (1, 2, 3), "b" : ("b", "d", 12), 0.23 : (11, 12, "thirt")},
"b" : {(1, 2, 3) : 1, ("a", "b", "b") : 12},
"c" : ((1, 2, 3, 4), ("a", "b", "c", "d"), ("a", "b", 12, 24) )
}
def sillyMeDataTwoTables():
return {k:v for k,v in sillyMeData().items() if k != "b"}
EPSILON = 1e-05
def perError(x1, x2) :
x1 = float(x1)
x2 = float(x2)
if (x1 < 0) and (x2 < 0) :
return perError(-x1, -x2)
if (x1 == float("inf")) :
return 0 if (x2 == float("inf")) else x1
SMALL_NOT_ZERO = 1e-10
assert(EPSILON>SMALL_NOT_ZERO)
abs1 = abs(x1)
abs2 = abs(x2)
# is it safe to divide by the bigger absolute value
if (max(abs1, abs2) > SMALL_NOT_ZERO) :
rtn = ((max(x1, x2) - min(x1, x2)) / max(abs1, abs2))
return rtn
return 0
def _nearlySame(x1, x2, epsilon) :
return perError(x1, x2) < epsilon
def nearlySame(*args, **kwargs) :
assert not kwargs or list(kwargs.keys()) == ["epsilon"]
epsilon = kwargs.get("epsilon", EPSILON)
if len(args) < 2 :
return True
return all (_nearlySame(x[0], x[1], epsilon) for x in
itertools.combinations(args, 2))
def pan_dat_maker(schema, tic_dat):
tdf = TicDatFactory(**schema)
pdf = PanDatFactory(**schema)
return pdf.copy_pan_dat(copy_to_pandas_with_reset(tdf, tic_dat))
| |
import logging
import difflib
from utils import urlencode
import xml.etree.ElementTree as ET
from flask import current_app
# from httplib2 import Http
from KalturaClient import *
from KalturaMetadataClientPlugin import *
from KalturaCaptionClientPlugin import *
from KalturaCoreClient import KalturaThumbAsset, KalturaUrlResource, \
KalturaEntryStatus, KalturaThumbParams, KalturaNullableBoolean
# to handle some particular kaltura exceptions:
from KalturaClientBase import KalturaException
import properties
from utils import convert_file_to_unicode, addFileLogger
if current_app:
logger = current_app.logger
else:
logger = logging.getLogger('myKalturaObject')
addFileLogger(logger, "myKalturaObject.log", 2)
SETTINGS = {}
properties.load_kaltura_settings(SETTINGS)
KALTURA_REQUEST_TIMEOUT = 60
KS_EXPIRY = 600
DEFAULT_SEARCH_FIELD_LIST = [
'id',
'name',
'status',
'description',
'tags',
'url',
'thumbnail_url',
'media_type',
'plays',
'views',
'rank',
'width',
'height',
'duration',
'views',
'updated',
'created',
'searchtext',
'startDate',
'endDate',
'partner_id',
'user_id',
'download_url',
'source_type',
'mediaDate',
'flavorParamsId',
'conversionQuality',
'creditUserName',
'creditUrl'
]
LANGUAGE_LIST = [
"Abkhazian",
"Afar",
"Afrikaans",
"Albanian",
"Amharic",
"Arabic",
"Armenian",
"Assamese",
"Aymara",
"Azerbaijani",
"Bashkir",
"Basque",
"Bengali (Bangla)",
"Bhutani",
"Bihari",
"Bislama",
"Breton",
"Bulgarian",
"Burmese",
"Byelorussian (Belarusian)",
"Cambodian",
"Catalan",
"Chinese",
"Corsican",
"Croatian",
"Czech",
"Danish",
"Dutch",
"English",
"Esperanto",
"Estonian",
"Faeroese",
"Farsi",
"Fiji",
"Finnish",
"French",
"Frisian",
"Galician",
"Gaelic (Scottish)",
"Gaelic (Manx)",
"Georgian",
"German",
"Greek",
"Greenlandic",
"Guarani",
"Gujarati",
"Hausa",
"Hebrew",
"Hebrew",
"Hindi",
"Hungarian",
"Icelandic",
"Indonesian",
"Indonesian",
"Interlingua",
"Interlingue",
"Inuktitut",
"Inupiak",
"Irish",
"Italian",
"Japanese",
"Javanese",
"Kannada",
"Kashmiri",
"Kazakh",
"Kinyarwanda (Ruanda)",
"Kirghiz",
"Kirundi (Rundi)",
"Korean",
"Kurdish",
"Laothian",
"Latin",
"Latvian (Lettish)",
"Limburgish ( Limburger)",
"Lingala",
"Lithuanian",
"Macedonian",
"Malagasy",
"Malay",
"Malayalam",
"Maltese",
"Maori",
"Marathi",
"Moldavian",
"Mongolian",
"Nauru",
"Nepali",
"Norwegian",
"Occitan",
"Oriya",
"Oromo (Afan, Galla)",
"Pashto (Pushto)",
"Polish",
"Portuguese",
"Punjabi",
"Quechua",
"Rhaeto-Romance",
"Romanian",
"Russian",
"Samoan",
"Sangro",
"Sanskrit",
"Serbian",
"Serbo-Croatian",
"Sesotho",
"Setswana",
"Shona",
"Sindhi",
"Sinhalese",
"Siswati",
"Slovak",
"Slovenian",
"Somali",
"Spanish",
"Sundanese",
"Swahili (Kiswahili)",
"Swedish",
"Tagalog",
"Tajik",
"Tamil",
"Tatar",
"Telugu",
"Thai",
"Tibetan",
"Tigrinya",
"Tonga",
"Tsonga",
"Turkish",
"Turkmen",
"Twi",
"Uighur",
"Ukrainian",
"Urdu",
"Uzbek",
"Vietnamese",
"Volapuk",
"Welsh",
"Wolof",
"Xhosa",
"Yiddish",
"Yoruba",
"Zulu"
]
kts_mobile_flavor_payload = {
"ks": "",
"clientTag": "testme",
"service": "flavorParams",
"action": "add",
"flavorParams:objectType": "KalturaFlavorParams",
"flavorParams:partnerId": "99",
"flavorParams:name": "kts_ipad",
"flavorParams:systemName": "kts_ipad",
"flavorParams:description": "Mobile/iPad friendly flavor added by KTS.",
"flavorParams:tags": "web,ipad,mobile,kts",
"flavorParams:videoCodec": "h264",
"flavorParams:videoBitrate": "1500",
"flavorParams:audioCodec": "aac",
"flavorParams:audioBitrate": "160",
"flavorParams:audioChannels": "2",
"flavorParams:audioSampleRate": "4800",
"flavorParams:width": "1024",
"flavorParams:height": "0",
"flavorParams:frameRate": "25",
"flavorParams:gopSize": "50",
"flavorParams:conversionEngines": "2,99",
"flavorParams:conversionEnginesExtraParams": "-flags +loop+mv4 -cmp 256 -partitions +parti4x4+partp8x8+partb8x8 -trellis 1 -refs 1 -me_range 16 -keyint_min 20 -sc_threshold 40 -i_qfactor 0.71 -bt 800k -maxrate 1200k -bufsize 1200k -rc_eq 'blurCplx^(1-qComp)' -level 30 -async 2 -vsync 2 | -flags +loop+mv4 -cmp 256 -partitions +parti4x4+partp8x8+partb8x8 -trellis 1 -refs 1 -me_range 16 -keyint_min 20 -sc_threshold 40 -i_qfactor 0.71 -bt 800k -maxrate 1200k -bufsize 1200k -rc_eq 'blurCplx^(1-qComp)' -level 30 -async 2 -vsync 2",
"flavorParams:twoPass": "0",
"flavorParams:deinterlice": "0",
"flavorParams:rotate": "0",
"flavorParams:operators": "",
"flavorParams:engineVersion": "0",
"flavorParams:format": "mp4",
"flavorParams:aspectRatioProcessingMode": "0",
"flavorParams:forceFrameToMultiplication16": "1",
"flavorParams:videoConstantBitrate": "0",
"flavorParams:videoBitrateTolerance": "0",
"flavorParams:requiredPermissions:item1:objectType": "KalturaString",
"flavorParams:requiredPermissions:item1:value": "FEATURE_MOBILE_FLAVORS"
}
def sort_by_field(l, field, ascending=True):
sorted_values = sorted([item[field] for item in l], reverse=not ascending)
keyed_objects = {item[field]: item for item in l}
sorted_list = [keyed_objects[value] for value in sorted_values]
return sorted_list
# The above needs to be replaced by something like the following:
# li = [{"time": 1},{"time":3},{"time":2}]
# li.sort(cmp= lambda x, y: 1 if x["time"] > y["time"] else (0 if x["time"] == y["time"] else -1))
def parse_caption_language(capfile, capformat):
if capformat == 'srt':
fname = os.path.basename(capfile)
parts = fname.split('.')
language = parts[-2] if len(parts) > 2 else parts[0]
else:
tree = ET.parse(capfile)
root = tree.getroot()
langcode = ''
for k, v in root.attrib.items():
if k.endswith('lang'):
langcode = v
if not langcode:
langcode = "en"
logger.info('no language found in xml, defaulting to English.')
langcode = langcode.upper()
print (langcode)
try:
language = KalturaLanguage.__dict__[langcode]
except:
language = "English"
logger.warning('Unsupported language "{}" in xml, '
'defaulting to English'.format(langcode))
return language
class KalturaLogger(IKalturaLogger):
def log(self, msg):
logger.info(msg)
def GetConfig(settings):
if not settings:
raise Exception("Settings not set in GetConfig")
partner_id = int(settings.get('PARTNER_ID'))
service_url = settings.get('SERVICE_URL')
print ("-> GetConfig", partner_id, service_url)
config = KalturaConfiguration(partner_id)
config.serviceUrl = service_url
config.setLogger(KalturaLogger())
config.requestTimeout = KALTURA_REQUEST_TIMEOUT
return config
def get_new_session_key(settings):
if not settings:
raise Exception("Settings not set in get_new_session_key")
partner_id = settings.get('PARTNER_ID')
admin_secret = settings.get('ADMIN_SECRET')
user_name = settings.get('USER_NAME')
client = KalturaClient(GetConfig(settings))
print ("-> get_new_session_key", admin_secret, user_name, partner_id)
return client.session.start(admin_secret,
user_name,
KalturaSessionType.ADMIN,
partner_id, KS_EXPIRY, "")
def create_session(kaltura_id, ks=None):
settings = properties.load_kaltura_settings().get(kaltura_id)
if not settings:
raise Exception("Kaltura ID %s Settings %s" % (kaltura_id, settings))
client = KalturaClient(GetConfig(settings))
if not settings:
raise Exception("Kaltura ID %s not in session %s" % (
kaltura_id, repr(settings)))
if not ks:
ks = get_new_session_key(settings)
client.setKs(ks)
return client
def count(client, mediafilter=None):
''' need to implement the filter, currently only gives total count
'''
try:
if client is None:
raise Exception("Client can not be None")
count = client.media.count(mediafilter)
return (True, count)
except Exception as e:
return (False,
"Unexpected error:" + "<p>" + repr(sys.exc_info()) +
"<br>Exception: " + str(e) + "</p>")
def pullVideo(pull_path,
media_name,
media_tags=None,
media_description=None,
client=None,
media_type=None):
try:
# create session
if client is None:
raise Exception("Client can not be None")
# Add Media Entry anf
mediaEntry = KalturaMediaEntry()
mediaEntry.setName(media_name)
if media_tags:
mediaEntry.setTags(media_tags)
if media_description:
mediaEntry.setDescription(media_description)
if not media_type:
media_type = KalturaMediaType(KalturaMediaType.VIDEO)
mediaEntry.setMediaType(media_type)
mediaEntry = client.media.addFromUrl(mediaEntry, pull_path)
return (True, mediaEntry.id)
except:
return (False,
"Unexpected error:" + "<p>" + repr(sys.exc_info()) + "</p>")
def uploadVideo(file_path,
media_name,
media_tags=None,
media_description=None,
client=None,
media_type=None):
try:
# create session
if client is None:
raise Exception("Client can not be None")
# add media
uploadTokenId = client.media.upload(open(file_path, 'rb'))
# Add Media Entry anf
mediaEntry = KalturaMediaEntry()
mediaEntry.setName(media_name)
if media_tags:
mediaEntry.setTags(media_tags)
if media_description:
mediaEntry.setDescription(media_description)
if not media_type:
mediaEntry.setMediaType(KalturaMediaType(KalturaMediaType.VIDEO))
else:
mediaEntry.setMediaType(KalturaMediaType(media_type))
mediaEntry = client.media.addFromUploadedFile(
mediaEntry, uploadTokenId)
return (True, mediaEntry.id)
except Exception as e:
return (False,
"Unexpected error:" + "<p>" + str(e) + "</p>")
def updateThumbnail(file_path, entry_id, client=None, set_default=False):
try:
# create session
if client is None:
raise Exception("Client can not be None")
thumbnail_asset_service = KalturaThumbAssetService(client)
thumb_asset = KalturaThumbAsset()
thumb_asset = thumbnail_asset_service.addFromImage(entry_id,
file(file_path,
'rb'))
if set_default:
thumbnail_set_default(client, thumb_asset.getId())
# mediaEntry = client.baseEntry.updateThumbnailJpeg(
# entry_id, file(file_path, 'rb'))
return (True, thumb_asset.getId())
except:
return (False,
"Unexpected error:" + "<p>" + repr(sys.exc_info()) + "</p>")
def updateThumbnailFromPlayer(offset, entry_id, client=None):
try:
if client is None:
raise Exception("Client can not be None")
thumbnail_asset_service = KalturaThumbAssetService(client)
thumb_params = KalturaThumbParams()
thumb_params.setVideoOffset(round(offset))
source_asset_id = None
thumbasset = thumbnail_asset_service.generate(entry_id, thumb_params,
source_asset_id)
thumbnail_set_default(client, thumbasset.id)
# mediaEntry = client.baseEntry.updateThumbnailFromSourceEntry(
# entry_id, entry_id, round(offset))
return (True, thumbasset.id)
except:
return (False,
"Unexpected error:" + "<p>" + repr(sys.exc_info()) + "</p>")
def thumbnail_delete(client, thumbnail_id):
msgs = []
try:
thumbnail_asset_service = KalturaThumbAssetService(client)
thumbnail_asset_service.delete(thumbnail_id)
msgs.append('Thumbnail with id %s deleted' % thumbnail_id)
returncontent = {'success': True, 'message': msgs}
except KalturaException as ke:
if ke.code == 'THUMB_ASSET_IS_DEFAULT':
returncontent = {'success': False,
'messages': ke.message,
'error': 'thumbnail is default'}
else:
msgs.append(
'Deletion failed for thumbnail with ID %s' % thumbnail_id)
msgs.append('Exception: ' + str(ke))
returncontent = {'success': False, 'message': msgs}
except Exception as e:
msgs.append('Deletion failed for thumbnail with ID %s' % thumbnail_id)
msgs.append('Exception: ' + str(e))
returncontent = {'success': False, 'message': msgs}
return returncontent
def thumbnail_list(client, entry_id, in_dict=True):
try:
thumb_asset_service = KalturaThumbAssetService(client)
kfilter = KalturaAssetFilter()
kfilter.entryIdEqual = entry_id
pager = None
response = thumb_asset_service.list(kfilter, pager)
thumbnail_list = []
for thumbnail in response.objects:
if in_dict:
thumbinfo = thumbnail_dictify(thumbnail)
thumbinfo['url'] = thumb_asset_service.getUrl(thumbinfo['id'],
None)
thumbnail_list.append(thumbinfo)
else:
thumbnail_list.append(thumbnail)
thumbnail_list = sort_by_field(thumbnail_list,
"created_at", ascending=False)
return thumbnail_list
except:
return (False,
"Unexpected error retrieving thumbnail list:" + "<p>" + repr(
sys.exc_info()) + "</p>")
def thumbnail_set_default(client, thumbnail_id):
msgs = []
try:
thumbnail_asset_service = KalturaThumbAssetService(client)
thumbnail_asset_service.setAsDefault(thumbnail_id)
msgs.append('Thumbnail with id ' + thumbnail_id + ' set as default')
return {'success': True, 'message': msgs}
except Exception as e:
msgs.append(
'setAsDefault failed for thumbnail with ID %s' % thumbnail_id)
msgs.append('Exception: ' + str(e))
return {'success': False, 'message': msgs}
def thumbnail_get_default(client, entry_id, in_dict=True):
msgs = []
try:
thumb_asset_service = KalturaThumbAssetService(client)
kfilter = KalturaAssetFilter()
kfilter.entryIdEqual = entry_id
pager = None
response = thumb_asset_service.list(kfilter, pager)
thumbnail_list = []
for thumbnail in response.objects:
if in_dict:
thumbinfo = thumbnail_dictify(thumbnail)
thumbinfo['url'] = thumb_asset_service.getUrl(thumbinfo['id'],
None)
thumbnail_list.append(thumbinfo)
else:
thumbnail_list.append(thumbnail)
default_thumb = None
for item in thumbnail_list:
if item.get('default', False) == True:
default_thumb = item
return default_thumb
except:
return (False,
"Unexpected error retrieving thumbnail list:" + "<p>" + repr(
sys.exc_info()) + "</p>")
def thumbnail_add_from_url(client, entry_id, thumburl):
msgs = []
try:
thumbnail_asset_service = KalturaThumbAssetService(client)
thumb_asset = KalturaThumbAsset()
thumb_asset = thumbnail_asset_service.add(entry_id, thumb_asset)
thumb_id = thumb_asset.getId()
msgs.append('ThumbAsset added to entry, id %s' % thumb_id)
content_resource = KalturaUrlResource(url=thumburl)
thumbnail_asset_service.setContent(thumb_id, content_resource)
msgs.append(
'url %s applied to thumbnail with id %s' % (thumburl, thumb_id))
return {'success': True, 'message': msgs}
except Exception as e:
msgs.append(
'Failed to add thumbnail from %s to media with entry_id %s' % (
entry_id, thumburl))
msgs.append('Exception: ' + str(e))
return {'success': False, 'message': msgs}
def add_kts_mobile_flavor(client, kaltura_id):
payload = kts_mobile_flavor_payload.copy()
payload['ks'] = client.getKs()
settings = properties.load_kaltura_settings().get(kaltura_id)
url = "%s/api_v3/" % settings['SERVICE_URL']
data = urlencode(payload)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req).read()
contentxml = ET.fromstring(response)
flavor_id = contentxml.find('./result/id').text
return flavor_id
def add_flavor_to_default_conversion_profile(client, flavor_id, kaltura_id):
def_cp = client.conversionProfile.getDefault()
settings = properties.load_kaltura_settings().get(kaltura_id)
cp_id = def_cp.id
cp_flavorParamsIds = def_cp.flavorParamsIds + u',{}'.format(flavor_id)
url = "{}/api_v3/?service=conversionProfile&action=update".format(
settings['SERVICE_URL'])
payload = {}
payload['ks'] = client.getKs()
payload['id'] = cp_id
payload['conversionProfile:flavorParamsIds'] = cp_flavorParamsIds
data = urlencode(payload)
req = urllib2.Request(url, data)
response = urllib2.urlopen(req).read()
if "<error>" in response:
raise Exception(response)
def searchVideos(client,
kaltura_id=None,
composite=False,
page_size=None,
page_index=None):
# Setup a pager and search to use
# pager = KalturaFilterPager()
# pager.setPageSize(5)
# pager.setPageIndex(1)
pager = None
if page_size:
pager = KalturaFilterPager()
pager.setPageSize(page_size)
if page_index:
pager.setPageIndex(page_index)
search = KalturaMediaEntryFilter()
search.setOrderBy(KalturaMediaEntryOrderBy.CREATED_AT_ASC)
# search.setMediaTypeEqual(KalturaMediaType.VIDEO) # Video only
print ("List videos, get the first one...")
# Get 10 video entries, but we'll just use the first one returned
entries = client.media.list(search, pager).objects
entriesData = []
for entry in entries:
entryData = {}
# Status
status_codes = {
KalturaEntryStatus.ERROR_IMPORTING: "ERROR_IMPORTING",
KalturaEntryStatus.ERROR_CONVERTING: "ERROR_CONVERTING",
KalturaEntryStatus.IMPORT: "IMPORT",
KalturaEntryStatus.PRECONVERT: "PRECONVERT",
KalturaEntryStatus.READY: "READY",
KalturaEntryStatus.DELETED: "DELETED",
KalturaEntryStatus.PENDING: "PENDING",
KalturaEntryStatus.MODERATE: "MODERATE",
KalturaEntryStatus.BLOCKED: "BLOCKED",
KalturaEntryStatus.NO_CONTENT: "NO_CONTENT",
KalturaEntryStatus.INFECTED: "virusScan.Infected",
KalturaEntryStatus.SCAN_FAILURE: "virusScan.ScanFailure"
}
entryData['status'] = status_codes.get(entry.getStatus().getValue(),
'UNKNOWN')
# Urls/Accessors
entryData['url'] = entry.getDataUrl()
entryData['download_url'] = entry.getDownloadUrl()
entryData['thumbnail_url'] = entry.getThumbnailUrl()
# Type (media type and source type)
typelist = {
KalturaMediaType.VIDEO: 'VIDEO',
KalturaMediaType.IMAGE: 'IMAGE',
KalturaMediaType.AUDIO: 'AUDIO',
KalturaMediaType.LIVE_STREAM_FLASH: 'LIVE_STREAM_FLASH',
KalturaMediaType.LIVE_STREAM_WINDOWS_MEDIA:
'LIVE_STREAM_WINDOWS_MEDIA',
KalturaMediaType.LIVE_STREAM_REAL_MEDIA: 'LIVE_STREAM_REAL_MEDIA',
KalturaMediaType.LIVE_STREAM_QUICKTIME: 'LIVE_STREAM_QUICKTIME'
}
sourcetypelist = {
KalturaSourceType.FILE: "FILE",
KalturaSourceType.WEBCAM: "WEBCAM",
KalturaSourceType.URL: "URL",
KalturaSourceType.SEARCH_PROVIDER: "SEARCH_PROVIDER",
KalturaSourceType.AKAMAI_LIVE: "AKAMAI_LIVE",
KalturaSourceType.MANUAL_LIVE_STREAM: "MANUAL_LIVE_STREAM"
}
entryData['media_type'] = typelist.get(entry.getMediaType().getValue(),
'UNKNOWN')
entryData['source_type'] = sourcetypelist.get(
entry.getSourceType().getValue(), 'UNKNOWN')
# ViewData
entryData['plays'] = entry.getPlays()
entryData['views'] = entry.getViews()
entryData['rank'] = '%s, %s' % (entry.getRank(), entry.getTotalRank())
# Video Properties
entryData['width'] = entry.getWidth()
entryData['height'] = entry.getHeight()
entryData['duration'] = entry.getDuration()
entryData['created'] = entry.getCreatedAt()
entryData['updated'] = entry.getUpdatedAt()
# Video Data
entryData['name'] = entry.getName()
entryData['description'] = entry.getDescription()
entryData['tags'] = entry.getTags()
# Search Text
entryData['searchtext'] = entry.getSearchText()
# Control
entryData['startDate'] = entry.getStartDate()
entryData['endDate'] = entry.getEndDate()
# Extra KalturaMediaEntry data
entryData['conversionQuality'] = entry.getConversionQuality()
entryData['creditUrl'] = entry.getCreditUrl()
entryData['creditUserName'] = entry.getCreditUserName()
entryData['flavorParamsId'] = entry.getFlavorParamsIds()
entryData['mediaDate'] = entry.getMediaDate()
# Identifiers
entryData['partner_id'] = entry.getPartnerId()
entryData['user_id'] = entry.getUserId()
if kaltura_id:
if composite:
entryData['id'] = 'kal:' + str(kaltura_id) + ':' + entry.getId()
else:
entryData['kaltura_id'] = str(kaltura_id)
entryData['entry_id'] = entry.getId()
else:
entryData['id'] = entry.getId()
entriesData.append(entryData)
return entriesData
def get_entry(media_id, kaltura_id, client=None, width=120, height=120):
settings = properties.load_kaltura_settings().get(kaltura_id)
error = False
error_message = None
entry = None
try:
entry = client.media.get(media_id)
except Exception as inst:
error_message = str(inst)
error = True
entryData = {}
if error:
entryData['success'] = False
entryData['message'] = error_message
else:
entryData['success'] = True
typelist = {
KalturaMediaType.VIDEO: 'VIDEO',
KalturaMediaType.IMAGE: 'IMAGE',
KalturaMediaType.AUDIO: 'AUDIO',
KalturaMediaType.LIVE_STREAM_FLASH: 'LIVE_STREAM_FLASH',
KalturaMediaType.LIVE_STREAM_WINDOWS_MEDIA:
'LIVE_STREAM_WINDOWS_MEDIA',
KalturaMediaType.LIVE_STREAM_REAL_MEDIA: 'LIVE_STREAM_REAL_MEDIA',
KalturaMediaType.LIVE_STREAM_QUICKTIME: 'LIVE_STREAM_QUICKTIME'
}
entryData['media_type'] = typelist[entry.getMediaType().getValue()]
# Urls/Accessors
entryData['url'] = entry.getDataUrl()
entryData['thumbnail_url_old'] = entry.getThumbnailUrl()
url = "%s/api_v3/?service=thumbasset&action=list" % settings[
'SERVICE_URL']
data = urlencode({"filter:entryIdEqual": media_id, "action": "list"})
req = urllib2.Request(url, data)
content = urllib2.urlopen(req).read()
entryData['ks'] = client.getKs()
contentxml = ET.fromstring(content)
thumb_id = ''
for item in contentxml.findall('./result/objects/item'):
tags = item.find('tags').text
if tags and 'default_thumb' in tags:
thumb_id = item.find('id').text
thumbnail_url = "%s/p/%s/thumbnail/entry_id/%s" % (
settings['SERVICE_URL'], settings['PARTNER_ID'], media_id)
entryData['thumbnail_url'] = thumbnail_url + \
"/width/%s/height/%s?%s" % (
width, height, int(time.time()))
entryData['download_url'] = entry.getDownloadUrl()
entryData['thumb_id'] = thumb_id
# ViewData
entryData['plays'] = entry.getPlays()
entryData['views'] = entry.getViews()
entryData['rank'] = (entry.getRank(), entry.getTotalRank())
# Video Properties
entryData['width'] = entry.getWidth()
entryData['height'] = entry.getHeight()
entryData['duration'] = entry.getDuration()
entryData['created'] = entry.getCreatedAt()
entryData['updated'] = entry.getUpdatedAt()
# Video Data
entryData['name'] = entry.getName()
entryData['description'] = entry.getDescription()
entryData['tags'] = entry.getTags()
# Search Text
entryData['searchtext'] = entry.getSearchText()
# Control
entryData['startDate'] = entry.getStartDate()
entryData['endDate'] = entry.getEndDate()
# entryData['status'] = entry.getStatus()
return entryData
def del_entry(media_id, client=None):
# error = False
error_message = None
try:
client.media.delete(media_id)
return {"success": True, "message": "MEDIA_DELETED"}
except Exception as inst:
error_message = str(inst)
# error = True
return {"success": False, "message": error_message}
def add_caption(captionfile,
entry_id,
capformat,
label=None,
language=None,
default=False,
client=None):
'''Add caption and get the caption asset ID.
Returns id used to identify the caption in kaltura.
Language can be "auto" to attempt auto-detection.
SRT file encoding is guessed and the file re-encoded to unicode.
'''
if client is None:
raise Exception('Client cannot be None')
# 0: ensure srt file is unicode-encoded
if capformat == 'srt':
convert_file_to_unicode(captionfile)
# 1: set parameters for caption_asset
caption_asset = KalturaCaptionAsset()
if default:
caption_asset.setIsDefault(
KalturaNullableBoolean(KalturaNullableBoolean.TRUE_VALUE))
if language == 'auto':
suggested_lang = parse_caption_language(captionfile, capformat)
autolang = difflib.get_close_matches(suggested_lang,
LANGUAGE_LIST, n=1, cutoff=0.8)
autolang = KalturaLanguage.EN if not autolang else autolang[0]
caption_asset.setLanguage(KalturaLanguage(autolang))
elif not language:
caption_asset.setLanguage(KalturaLanguage(KalturaLanguage.EN))
else:
caption_asset.setLanguage(KalturaLanguage(language.capitalize()))
# this might have to change to something like
# KalturaLanguage.EN
if label:
caption_asset.setLabel(label)
if capformat == 'srt':
caption_asset.setFormat(KalturaCaptionType(KalturaCaptionType.SRT))
elif capformat == 'dfxp':
caption_asset.setFormat(KalturaCaptionType(KalturaCaptionType.DFXP))
else:
raise Exception('bad format %s' % capformat)
# 2: attach the caption_asset to a media entry. Later we'll attach a file
# to the caption_asset.
caption_asset_service = KalturaCaptionAssetService(client)
caption_asset = caption_asset_service.add(entry_id, caption_asset)
# 3: upload file using uploadtoken, and get its token and then its
# resource object
upload_token = client.uploadToken.add()
upload_token_detailed = client.uploadToken.upload(
upload_token.getId(), file(captionfile, 'rb'))
uploaded_resource = KalturaUploadedFileTokenResource(
upload_token_detailed.getId())
# 4: attach file to caption entry
caption_asset = caption_asset_service.setContent(
caption_asset.getId(), uploaded_resource)
return caption_asset.getId()
def caption_dictify(caption_asset):
caption_details = {}
caption_details['id'] = caption_asset.getId()
caption_details['language'] = caption_asset.getLanguage().getValue()
caption_details['created_at'] = caption_asset.getCreatedAt()
defval = caption_asset.getIsDefault()
if 'getValue' in dir(defval):
if defval.getValue() == KalturaNullableBoolean.TRUE_VALUE:
caption_details['default'] = True
else:
caption_details['default'] = False
else:
if defval == KalturaNullableBoolean.TRUE_VALUE:
caption_details['default'] = True
else:
caption_details['default'] = False
# raise Exception, 'breaking bad'
caption_details['name'] = caption_asset.getLabel()
if caption_asset.getFormat().getValue() == KalturaCaptionType.SRT:
caption_details['format'] = 'srt'
elif caption_asset.getFormat().getValue() == KalturaCaptionType.DFXP:
caption_details['format'] = 'dfxp'
cap_stat = caption_asset.getStatus().getValue()
stat_codes = {
-1: 'ERROR',
0: 'QUEUED',
2: 'READY',
3: 'DELETED',
7: 'IMPORTING'
}
caption_details['status'] = stat_codes.get(cap_stat, 'UNKNOWN')
return caption_details
def thumbnail_dictify(thumbasset):
thumb_details = {}
thumb_details['id'] = thumbasset.getId()
thumb_details['width'] = thumbasset.getWidth()
thumb_details['height'] = thumbasset.getHeight()
thumb_details['size'] = thumbasset.getSize()
thumb_details['created_at'] = thumbasset.getCreatedAt()
thumb_details[
'default'] = True if thumbasset.getTags() == 'default_thumb' else False
status_codes = {
-1: 'ERROR',
0: 'QUEUED',
2: 'READY',
3: 'DELETED',
7: 'IMPORTING'
}
thumb_details['status'] = status_codes.get(
thumbasset.getStatus().getValue(), 'UNKNOWN')
return thumb_details
def get_caption(caption_id, client, url=True):
caption_asset_service = KalturaCaptionAssetService(client)
if url:
return caption_asset_service.getUrl(caption_id)
caption_asset = caption_asset_service.get(caption_id)
return caption_dictify(caption_asset)
def get_entry_captions(entry_id, client, in_dict=True):
caption_asset_service = KalturaCaptionAssetService(client)
listfilter = KalturaAssetFilter()
listfilter.entryIdEqual = entry_id
pager = None
response = caption_asset_service.list(listfilter, pager)
caption_list = []
for caption in response.objects:
if in_dict:
caption_entry = caption_dictify(caption)
caption_entry['url'] = caption_asset_service.getUrl(caption.getId())
caption_list.append(caption_entry)
else:
caption_list.append(caption)
caption_list = sort_by_field(caption_list, "created_at", ascending=False)
return caption_list
def update_caption(caption_id, language, capformat, name, default, client):
caption_asset_service = KalturaCaptionAssetService(client)
msgs = []
caption_asset = KalturaCaptionAsset()
if language:
try:
caption_asset.setLanguage(KalturaLanguage(language.capitalize()))
msgs.append('language is ' + language)
except:
msgs.append('unable to set language. ')
if capformat:
if capformat.lower() == 'srt':
capformat = KalturaCaptionType.SRT
msgs.append('format is srt')
elif capformat.lower() == 'dfxp':
capformat = KalturaCaptionType.DFXP
msgs.append('format is dfxp')
else:
raise Exception('Unsupported format')
caption_asset.setFormat(capformat)
if name:
caption_asset.setLabel(name)
msgs.append('label (name) is ' + name)
if default:
msgs.append('default is true')
caption_asset.setIsDefault(
KalturaNullableBoolean(KalturaNullableBoolean.TRUE_VALUE))
try:
caption_asset_service.update(caption_id, caption_asset)
except Exception as e:
return {'success':False, 'messages':str(e)}
return {'success': True, 'messages': msgs}
def del_caption(caption_id, client):
caption_asset_service = KalturaCaptionAssetService(client)
msgs = []
try:
caption_asset_service.delete(caption_id)
msgs.append('Caption with id ' + caption_id + 'deleted')
return {'success': True, 'message': msgs}
except Exception as e:
msgs.append('Deletion failed for caption with ID ' + caption_id)
msgs.append('Exception: ' + str(e))
return {'success': False, 'message': msgs}
def caption_set_default(caption_id, client):
caption_asset_service = KalturaCaptionAssetService(client)
msgs = []
try:
caption_asset_service.setAsDefault(caption_id)
msgs.append('Caption with id ' + caption_id + 'set as default')
return {'success': True, 'message': msgs}
except Exception as e:
msgs.append('setAsDefault failed for caption with ID ' + caption_id)
msgs.append('Exception: ' + str(e))
return {'success': False, 'message': msgs}
def remote_thumbs(client, thumb_id):
if client is None:
raise Exception("Client can not be None")
thumbnail_asset_service = KalturaThumbAssetService(client)
remote_paths = thumbnail_asset_service.getRemotePaths(thumb_id)
return {"success": True, "paths": remote_paths}
if __name__ == '__main__':
pass
| |
#-*- coding: utf-8 -*-
# stino/base.py
import os
import re
from . import constant
from . import textutil
from . import fileutil
from . import sketch
ino_ext_list = ['.ino', '.pde']
build_folder_name_list = ['cores', 'variants', 'system', 'bootloaders']
class ArduinoInfo:
def __init__(self):
self.refresh()
def refresh(self):
self.version_text = getVersionText()
self.version = getVersion(self.version_text)
self.genSketchbook()
self.genPlatformList()
self.genKeywordList()
def getSketchbook(self):
return self.sketchbook
def getPlatformList(self):
return self.platform_list
def getKeywordList(self):
return self.keyword_list
def getKeywordRefDict(self):
return self.keyword_ref_dict
def getVersion(self):
return self.version
def getVersionText(self):
return self.version_text
def genSketchbook(self):
self.sketchbook = getSketchbook()
def genPlatformList(self):
self.platform_list = getPlatformList()
def genKeywordList(self):
self.keyword_list = []
for platform in self.platform_list:
self.keyword_list += getKeywordListFromPlatform(platform)
self.keyword_ref_dict = getKeywordRefList(self.keyword_list)
class Platform:
def __init__(self, name):
self.name = name
self.core_folder_list = []
self.board_list = []
self.programmer_list = []
self.example = SketchItem(name)
self.lib_list = []
self.h_lib_dict = {}
def getName(self):
return self.name
def setName(self, name):
self.name = name
def getCoreFolderList(self):
return self.core_folder_list
def setCoreFolderList(self, folder_list):
self.core_folder_list = folder_list
def addCoreFolder(self, core_folder):
self.core_folder_list.append(core_folder)
def getBoardList(self):
return self.board_list
def addBoardList(self, board_list):
self.board_list += board_list
def addBoard(self, board):
self.board_list.append(board)
def getProgrammerList(self):
return self.programmer_list
def addProgrammerList(self, programmer_list):
self.programmer_list += programmer_list
def addProgrammer(self, programmer):
self.programmer_list.append(programmer)
def getExample(self):
return self.example
def setExample(self, example):
self.example = example
def getLibList(self):
return self.lib_list
def setLibList(self, lib_list):
self.lib_list = lib_list
def getHLibDict(self):
return self.h_lib_dict
def setHLibDict(self, h_lib_dict):
self.h_lib_dict = h_lib_dict
class SketchItem:
def __init__(self, name):
self.name = name
self.folder = ''
self.children = []
def hasSubItem(self):
state = False
if self.children:
state = True
return state
def getName(self):
return self.name
def setName(self, name):
self.name = name
def getFolder(self):
return self.folder
def setFolder(self, folder):
self.folder = folder
def getSubItemList(self):
return self.children
def setSubItemList(self, sub_item_list):
self.children = sub_item_list
def addSubItem(self, sub_item):
self.children.append(sub_item)
def addSubItemList(self, sub_item_list):
self.children += sub_item_list
class LibItem:
def __init__(self, name):
self.name = name
self.folder = ''
def hasSubItem(self):
state = False
if self.children:
state = True
return state
def getName(self):
return self.name
def setName(self, name):
self.name = name
def getFolder(self):
return self.folder
def setFolder(self, folder):
self.folder = folder
class Board:
def __init__(self, name):
self.name = name
self.option_list = []
self.args = {}
self.folder = ''
def getName(self):
return self.name
def setName(self, name):
self.name = name
def getArgs(self):
return self.args
def setArgs(self, args):
self.args = args
def getFolder(self):
return self.folder
def setFolder(self, folder):
self.folder = folder
def getOptionList(self):
return self.option_list
def addOption(self, option):
self.option_list.append(option)
class BoardOption:
def __init__(self, name):
self.name = name
self.item_list = []
def getName(self):
return self.name
def setName(self, name):
self.name = name
def getItemList(self):
return self.item_list
def addItem(self, item):
self.item_list.append(item)
class BoardOptionItem:
def __init__(self, name):
self.name = name
self.args = {}
def getName(self):
return self.name
def setName(self, name):
self.name = name
def getArgs(self):
return self.args
def setArgs(self, args):
self.args = args
class Programmer:
def __init__(self, name):
self.name = name
self.args = {}
def getName(self):
return self.name
def setName(self, name):
self.name = name
def getArgs(self):
return self.args
def setArgs(self, args):
self.args = args
class Keyword:
def __init__(self, name):
self.name = name
self.type = ''
self.ref = ''
def getName(self):
return self.name
def getType(self):
return self.type
def getRef(self):
return self.ref
def setName(self, name):
self.name = name
def setType(self, keyword_type):
self.type = keyword_type
def setRef(self, ref):
self.ref = ref
def getRealArduinoPath(folder):
if constant.sys_platform == 'osx':
folder = os.path.join(folder, 'Contents/Resources/Java')
return folder
def isArduinoFolder(folder):
state = False
if folder and os.path.isdir(folder):
folder = getRealArduinoPath(folder)
hardware_path = os.path.join(folder, 'hardware')
lib_path = os.path.join(folder, 'lib')
version_file_path = os.path.join(lib_path, 'version.txt')
if os.path.isdir(hardware_path) and os.path.isfile(version_file_path):
state = True
return state
def getArduinoFolder():
arduino_folder = constant.sketch_settings.get('arduino_folder', '')
if arduino_folder:
if not isArduinoFolder(arduino_folder):
arduino_folder = ''
else:
if constant.sys_platform == 'osx':
arduino_folder = getRealArduinoPath(arduino_folder)
return arduino_folder
def setArduinoFolder(arduino_folder):
constant.sketch_settings.set('arduino_folder', arduino_folder)
def isSketchFolder(folder):
state = False
file_list = fileutil.listDir(folder, with_dirs = False)
for cur_file in file_list:
cur_file_ext = os.path.splitext(cur_file)[1]
if cur_file_ext in ino_ext_list:
state = True
break
return state
def getDefaultSketchbookFolder():
document_folder = fileutil.getDocumentFolder()
sketchbook_folder = os.path.join(document_folder, 'Arduino')
return sketchbook_folder
def getSketchbookFolder():
sketchbook_folder = constant.global_settings.get('sketchbook_folder', '')
if (not sketchbook_folder) or (not os.path.isdir(sketchbook_folder)):
sketchbook_folder = getDefaultSketchbookFolder()
setSketchbookFolder(sketchbook_folder)
checkSketchbookFolder(sketchbook_folder)
return sketchbook_folder
def setSketchbookFolder(sketchbook_folder):
constant.global_settings.set('sketchbook_folder', sketchbook_folder)
def checkSketchbookFolder(sketchbook_folder):
libraries_folder = os.path.join(sketchbook_folder, 'libraries')
hardware_folder = os.path.join(sketchbook_folder, 'hardware')
folder_list = [sketchbook_folder, libraries_folder, hardware_folder]
for folder in folder_list:
if os.path.isfile(folder):
os.rename(folder, folder+'.bak')
if not os.path.exists(folder):
os.makedirs(folder)
def getRootFolderList():
sketchbook_folder = getSketchbookFolder()
arduino_folder = getArduinoFolder()
folder_list = [sketchbook_folder]
if arduino_folder:
folder_list.append(arduino_folder)
return folder_list
def isCoreFolder(folder):
state = False
if os.path.isdir(folder):
cores_folder = os.path.join(folder, 'cores')
boards_file = os.path.join(folder, 'boards.txt')
if os.path.isdir(cores_folder) or os.path.isfile(boards_file):
state = True
return state
def getCoreFolderList():
core_folder_list = []
folder_list = getRootFolderList()
for folder in folder_list:
hardware_folder = os.path.join(folder, 'hardware')
if not os.path.isdir(hardware_folder):
continue
sub_folder_name_list = fileutil.listDir(hardware_folder, with_files = False)
for sub_folder_name in sub_folder_name_list:
if sub_folder_name == 'tools':
continue
sub_folder = os.path.join(hardware_folder, sub_folder_name)
if isCoreFolder(sub_folder):
core_folder_list.append(sub_folder)
else:
sub_sub_folder_name_list = fileutil.listDir(sub_folder, with_files = False)
for sub_sub_folder_name in sub_sub_folder_name_list:
sub_sub_folder = os.path.join(sub_folder, sub_sub_folder_name)
if isCoreFolder(sub_sub_folder):
core_folder_list.append(sub_sub_folder)
return core_folder_list
def getPlatformNameFromFile(platform_file):
platform_name = ''
# opened_file = open(platform_file)
# lines = opened_file.readlines()
# opened_file.close()
lines = fileutil.readFileLines(platform_file)
for line in lines:
if 'name=' in line:
(key, value) = textutil.getKeyValue(line)
platform_name = value
break
return platform_name
def getPlatformNameFromCoreFolder(core_folder):
platform_name = 'Arduino AVR Boards'
platform_file = os.path.join(core_folder, 'platform.txt')
if os.path.isfile(platform_file):
platform_name = getPlatformNameFromFile(platform_file)
else:
cores_folder = os.path.join(core_folder, 'cores')
arduino_src_folder = os.path.join(cores_folder, 'arduino')
if not os.path.isdir(arduino_src_folder):
core_folder_name = os.path.split(core_folder)[1]
platform_name = core_folder_name[0].upper() + core_folder_name[1:] + ' Boards'
return platform_name
def getPlatformListFromCoreFolderList():
platform_list = []
platform_name_list = []
name_platform_dict = {}
root_folder_list = getRootFolderList()
platform = Platform('General')
platform.setCoreFolderList(root_folder_list)
platform_list.append(platform)
core_folder_list = getCoreFolderList()
for core_folder in core_folder_list:
platform_name = getPlatformNameFromCoreFolder(core_folder)
if platform_name:
if not platform_name in platform_name_list:
platform = Platform(platform_name)
platform_name_list.append(platform_name)
platform_list.append(platform)
name_platform_dict[platform_name] = platform
else:
platform = name_platform_dict[platform_name]
platform.addCoreFolder(core_folder)
return platform_list
def getBoardGeneralBlock(board_block):
block = []
for line in board_block:
if 'menu.' in line:
break
block.append(line)
return block
def getBoardOptionBlock(board_block, menu_option_id):
block = []
for line in board_block:
if menu_option_id in line:
index = line.index(menu_option_id) + len(menu_option_id) + 1
block.append(line[index:])
return block
def splitBoardOptionBlock(board_option_block):
block_list = []
item_id_list = []
for line in board_option_block:
(key, value) = textutil.getKeyValue(line)
length = len(key.split('.'))
if length <= 2 :
item_id = key
item_id = item_id.replace('name', '')
item_id_list.append(item_id)
for item_id in item_id_list:
block = []
for line in board_option_block:
if item_id in line:
block.append(line)
block_list.append(block)
return block_list
def getBlockInfo(block):
title_line = block[0]
(item_id, caption) = textutil.getKeyValue(title_line)
item_id = item_id.replace('.name', '') + '.'
args = {}
for line in block[1:]:
(key, value) = textutil.getKeyValue(line)
key = key.replace(item_id, '')
args[key] = value
return (caption, args)
def getBoardListFromFolder(folder, build_folder_list):
board_list = []
boards_file = os.path.join(folder, 'boards.txt')
if os.path.isfile(boards_file):
# opened_file = open(boards_file, 'r')
# lines = opened_file.readlines()
# opened_file.close()
lines = fileutil.readFileLines(boards_file)
board_block_list = textutil.getBlockList(lines)
board_option_id_list = []
board_option_caption_dict = {}
header_block = board_block_list[0]
for line in header_block:
(board_option_id, caption) = textutil.getKeyValue(line)
board_option_id_list.append(board_option_id)
board_option_caption_dict[board_option_id] = caption
for board_block in board_block_list[1:]:
board_general_block = getBoardGeneralBlock(board_block)
(name, args) = getBlockInfo(board_general_block)
args['build.cores_folder'] = build_folder_list[0]
args['build.variants_folder'] = build_folder_list[1]
args['build.system.path'] = build_folder_list[2]
args['build.uploaders_folder'] = build_folder_list[3]
cur_board = Board(name)
cur_board.setFolder(folder)
cur_board.setArgs(args)
for board_option_id in board_option_id_list:
board_option_block = getBoardOptionBlock(board_block, board_option_id)
if board_option_block:
cur_board_option = BoardOption(board_option_caption_dict[board_option_id])
option_item_block_list = splitBoardOptionBlock(board_option_block)
for option_item_block in option_item_block_list:
(name, args) = getBlockInfo(option_item_block)
cur_option_item = BoardOptionItem(name)
cur_option_item.setArgs(args)
cur_board_option.addItem(cur_option_item)
cur_board.addOption(cur_board_option)
board_list.append(cur_board)
return board_list
def getProgrammerListFromFolder(folder):
programmer_list = []
programmers_file = os.path.join(folder, 'programmers.txt')
if os.path.isfile(programmers_file):
# opened_file = open(programmers_file, 'r')
# lines = opened_file.readlines()
# opened_file.close()
lines = fileutil.readFileLines(programmers_file)
programmer_block_list = textutil.getBlockList(lines)
for programmer_block in programmer_block_list:
if programmer_block:
(name, args) = getBlockInfo(programmer_block)
if not 'program.extra_params' in args:
if 'Parallel' in name:
value = '-F'
else:
value = ''
if 'communication' in args:
comm_type = args['communication']
if comm_type == 'serial':
port = '{serial.port}'
else:
port = comm_type
value += '-P%s' % port
value += ' '
if 'speed' in args:
if not 'program.speed' in args:
args['program.speed'] = args['speed']
if 'program.speed' in args:
speed = args['program.speed']
value += '-b%s' % speed
args['program.extra_params'] = value
cur_programmer = Programmer(name)
cur_programmer.setArgs(args)
programmer_list.append(cur_programmer)
return programmer_list
def getSketchFromFolder(folder, level = 0):
folder_name = os.path.split(folder)[1]
sketch = SketchItem(folder_name)
has_sub_folder = False
if level < 4:
sub_folder_name_list = fileutil.listDir(folder, with_files = False)
if sub_folder_name_list:
for sub_folder_name in sub_folder_name_list:
sub_folder = os.path.join(folder, sub_folder_name)
sub_sketch = getSketchFromFolder(sub_folder, level + 1)
if sub_sketch.hasSubItem():
sketch.addSubItem(sub_sketch)
elif isSketchFolder(sub_folder):
sub_sketch.setFolder(sub_folder)
sketch.addSubItem(sub_sketch)
has_sub_folder = True
if not has_sub_folder:
if isSketchFolder(folder):
sketch.setFolder(folder)
if level == 0:
sub_sketch = SketchItem('-')
sketch.addSubItem(sub_sketch)
return sketch
def printSketch(sketch, level = 0):
caption = sketch.getName()
if level > 0:
caption = '\t' * level + '|__' + caption
if not sketch.hasSubItem():
caption += ' ('
caption += sketch.getFolder()
caption += ')'
print(caption)
if sketch.hasSubItem():
for sub_item in sketch.getSubItemList():
printSketch(sub_item, level+1)
def getSketchbook():
sketchbook_folder = getSketchbookFolder()
sketchbook = getSketchFromFolder(sketchbook_folder)
sketchbook.setName('Sketchbook')
return sketchbook
def getGeneralLibraryListFromFolder(folder, platform_name = ''):
lib_list = []
libraries_folder = os.path.join(folder, 'libraries')
if os.path.isdir(libraries_folder):
sub_folder_name_list = fileutil.listDir(libraries_folder, with_files = False)
for sub_folder_name in sub_folder_name_list:
sub_folder = os.path.join(libraries_folder, sub_folder_name)
lib_item = LibItem(sub_folder_name)
lib_item.setFolder(sub_folder)
arch_folder = os.path.join(sub_folder, 'arch')
if os.path.isdir(arch_folder):
avr_folder = os.path.join(arch_folder, 'avr')
sam_folder = os.path.join(arch_folder, 'sam')
if os.path.isdir(avr_folder):
if 'AVR' in platform_name:
lib_list.append(lib_item)
if os.path.isdir(sam_folder):
if 'ARM' in platform_name:
lib_list.append(lib_item)
else:
if 'General' in platform_name:
lib_list.append(lib_item)
if lib_list:
lib_item = LibItem('-')
lib_list.append(lib_item)
return lib_list
def getPlatformLibraryListFromFolder(folder):
lib_list = []
libraries_folder = os.path.join(folder, 'libraries')
if os.path.isdir(libraries_folder):
sub_folder_name_list = fileutil.listDir(libraries_folder, with_files = False)
for sub_folder_name in sub_folder_name_list:
sub_folder = os.path.join(libraries_folder, sub_folder_name)
lib_item = LibItem(sub_folder_name)
lib_item.setFolder(sub_folder)
lib_list.append(lib_item)
if lib_list:
lib_item = LibItem('-')
lib_list.append(lib_item)
return lib_list
def getLibraryListFromPlatform(platform_list, platform_id):
lib_list = []
platform_general = platform_list[0]
general_core_folder_list = platform_general.getCoreFolderList()
cur_platform = platform_list[platform_id]
core_folder_list = cur_platform.getCoreFolderList()
platform_name = cur_platform.getName()
for core_folder in general_core_folder_list:
lib_list += getGeneralLibraryListFromFolder(core_folder, platform_name)
if platform_id > 0:
for core_folder in core_folder_list:
lib_list += getPlatformLibraryListFromFolder(core_folder)
return lib_list
def getExampleListFromFolder(folder):
example_list = []
libraries_folder = os.path.join(folder, 'libraries')
examples_folder = os.path.join(folder, 'examples')
sub_folder_list = [examples_folder, libraries_folder]
for sub_folder in sub_folder_list:
if os.path.isdir(sub_folder):
example = getSketchFromFolder(sub_folder)
example_list.append(example)
return example_list
def getExampleFromPlatform(platform):
name = platform.getName()
example = SketchItem(name)
example_list = []
core_folder_list = platform.getCoreFolderList()
for core_folder in core_folder_list:
example_list += getExampleListFromFolder(core_folder)
for cur_example in example_list:
sub_example_list = cur_example.getSubItemList()
example.addSubItemList(sub_example_list)
return example
def hasCoreSrcFolder(folder):
state = False
cores_folder = os.path.join(folder, 'cores')
if os.path.isdir(cores_folder):
state = True
return state
def getCoreSrcFolderFromPlatform(platform):
core_src_folder = ''
core_folder_list = platform.getCoreFolderList()
for core_folder in core_folder_list:
if hasCoreSrcFolder(core_folder):
# core_src_folder = os.path.join(core_folder, 'cores')
core_src_folder = core_folder
break
return core_src_folder
def findSubFolderInFolderList(folder_list, sub_folder_name):
sub_folder = ''
main_folder = ''
arduino_folder = getArduinoFolder()
for cur_folder in folder_list:
if arduino_folder in cur_folder:
main_folder = cur_folder
break
if main_folder:
cur_sub_folder = os.path.join(main_folder, sub_folder_name)
if os.path.isdir(cur_sub_folder):
sub_folder = cur_sub_folder
if not sub_folder:
for cur_folder in folder_list:
cur_sub_folder = os.path.join(cur_folder, sub_folder_name)
if os.path.isdir(cur_sub_folder):
sub_folder = cur_sub_folder
break
return sub_folder
def getDefaultBuildFolderList(core_folder_list, folder_name_list):
default_build_folder_list = []
for folder_name in folder_name_list:
index = folder_name_list.index(folder_name)
cur_folder = findSubFolderInFolderList(core_folder_list, folder_name)
default_build_folder_list.append(cur_folder)
return default_build_folder_list
def getFolderBuildFolderDict(core_folder_list, folder_name_list):
folder_build_folder_dict = {}
default_build_folder_list = getDefaultBuildFolderList(core_folder_list, folder_name_list)
for core_folder in core_folder_list:
build_folder_list = []
for folder_name in folder_name_list:
index = folder_name_list.index(folder_name)
cur_folder = os.path.join(core_folder, folder_name)
if not os.path.isdir(cur_folder):
cur_folder = default_build_folder_list[index]
build_folder_list.append(cur_folder)
folder_build_folder_dict[core_folder] = build_folder_list
return folder_build_folder_dict
def getPlatformList():
platform_list = getPlatformListFromCoreFolderList()
for platform in platform_list:
platform_name = platform.getName()
index = platform_list.index(platform)
core_folder_list = platform.getCoreFolderList()
folder_build_folder_dict = getFolderBuildFolderDict(core_folder_list, build_folder_name_list)
example = getExampleFromPlatform(platform)
lib_list = getLibraryListFromPlatform(platform_list, index)
h_lib_dict = getHLibDict(lib_list, platform_name)
platform.setExample(example)
platform.setLibList(lib_list)
platform.setHLibDict(h_lib_dict)
for core_folder in core_folder_list:
build_folder_list = folder_build_folder_dict[core_folder]
board_list = getBoardListFromFolder(core_folder, build_folder_list)
programmer_list = getProgrammerListFromFolder(core_folder)
platform.addBoardList(board_list)
platform.addProgrammerList(programmer_list)
return platform_list
def getVersionText():
version_text = '1.0.5'
arduino_root = getArduinoFolder()
if arduino_root:
lib_folder = os.path.join(arduino_root, 'lib')
version_file = os.path.join(lib_folder, 'version.txt')
if os.path.isfile(version_file):
# opened_file = open(version_file)
# lines = opened_file.readlines()
# opened_file.close()
lines = fileutil.readFileLines(version_file)
for line in lines:
line = line.strip()
if line:
version_text = line
break
return version_text
def getVersion(version_text):
version = 105
patter_text = r'[\d.]+'
pattern = re.compile(patter_text)
match = pattern.search(version_text)
if match:
version_text = match.group()
number_list = version_text.split('.')
version = 0
power = 0
for number in number_list:
number = int(number)
version += number * (10 ** power)
power -= 1
version *= 100
return int(version)
def getKeywordListFromFile(keywords_file):
keyword_list = []
# opened_file = open(keywords_file, 'r')
# lines = opened_file.readlines()
# opened_file.close()
lines = fileutil.readFileLines(keywords_file)
for line in lines:
line = line.strip()
if line and (not '#' in line):
word_list = re.findall(r'\S+', line)
if len(word_list) > 1:
keyword_name = word_list[0]
if len(word_list) == 3:
keyword_type = word_list[1]
keyword_ref = word_list[2]
elif len(word_list) == 2:
if 'LITERAL' in word_list[1] or 'KEYWORD' in word_list[1]:
keyword_type = word_list[1]
keyword_ref = ''
else:
keyword_type = ''
keyword_ref = word_list[1]
cur_keyword = Keyword(keyword_name)
cur_keyword.setType(keyword_type)
cur_keyword.setRef(keyword_ref)
keyword_list.append(cur_keyword)
return keyword_list
def getKeywordListFromCoreFolderList(core_folder_list):
keyword_list = []
for core_folder in core_folder_list:
lib_folder = os.path.join(core_folder, 'lib')
keywords_file = os.path.join(lib_folder, 'keywords.txt')
if os.path.isfile(keywords_file):
cur_keyword_list = getKeywordListFromFile(keywords_file)
keyword_list += cur_keyword_list
return keyword_list
def getKeywordListFromLibList(lib_list):
keyword_list = []
for lib in lib_list:
lib_folder = lib.getFolder()
keywords_file = os.path.join(lib_folder, 'keywords.txt')
if os.path.isfile(keywords_file):
cur_keyword_list = getKeywordListFromFile(keywords_file)
keyword_list += cur_keyword_list
return keyword_list
def getKeywordListFromPlatform(platform):
keyword_list = []
core_folder_list = platform.getCoreFolderList()
lib_list = platform.getLibList()
keyword_list += getKeywordListFromCoreFolderList(core_folder_list)
keyword_list += getKeywordListFromLibList(lib_list)
return keyword_list
def getKeywordRefList(keyword_list):
keyword_ref_dict = {}
for keyword in keyword_list:
ref = keyword.getRef()
if ref and ref[0].isupper():
keyword_name = keyword.getName()
keyword_ref_dict[keyword_name] = ref
return keyword_ref_dict
def getUrl(url):
file_name = url + '.html'
arduino_folder = getArduinoFolder()
reference_folder = os.path.join(arduino_folder, 'reference')
reference_file = os.path.join(reference_folder, file_name)
if os.path.isfile(reference_file):
reference_file = reference_file.replace(os.path.sep, '/')
url = 'file://' + reference_file
else:
url = 'http://arduino.cc'
return url
def getSelectedTextFromView(view):
selected_text = ''
region_list = view.sel()
for region in region_list:
selected_region = view.word(region)
selected_text += view.substr(selected_region)
selected_text += '\n'
return selected_text
def getWordListFromText(text):
pattern_text = r'\b\w+\b'
word_list = re.findall(pattern_text, text)
return word_list
def getSelectedWordList(view):
selected_text = getSelectedTextFromView(view)
word_list = getWordListFromText(selected_text)
return word_list
def getHLibDict(lib_list, platform_name):
h_lib_dict = {}
for lib in lib_list:
lib_folder = lib.getFolder()
h_list = sketch.getHSrcFileList(lib_folder, platform_name)
for h in h_list:
h_lib_dict[h] = lib_folder
return h_lib_dict
def newSketch(sketch_name):
sketch_file = ''
sketchbook_folder = getSketchbookFolder()
sketch_folder = os.path.join(sketchbook_folder, sketch_name)
if not os.path.exists(sketch_folder):
os.makedirs(sketch_folder)
file_name = sketch_name + '.ino'
sketch_file = os.path.join(sketch_folder, file_name)
text = '// %s\n\n' % file_name
text += 'void setup() {\n\n'
text += '}\n\n'
text += 'void loop() {\n\n'
text += '}\n\n'
fileutil.writeFile(sketch_file, text)
return sketch_file
| |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run masked LM/next sentence masked_lm pre-training for BERT."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import modeling
import optimization
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string(
"input_file", None,
"Input TF example files (can be a glob or comma separated).")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded. Must match data generation.")
flags.DEFINE_integer(
"max_predictions_per_seq", 20,
"Maximum number of masked LM predictions per sequence. "
"Must match data generation.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_integer("num_train_steps", 100000, "Number of training steps.")
flags.DEFINE_integer("num_warmup_steps", 10000, "Number of warmup steps.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer("max_eval_steps", 100, "Maximum number of eval steps.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
masked_lm_positions = features["masked_lm_positions"]
masked_lm_ids = features["masked_lm_ids"]
masked_lm_weights = features["masked_lm_weights"]
next_sentence_labels = features["next_sentence_labels"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
(masked_lm_loss,
masked_lm_example_loss, masked_lm_log_probs) = get_masked_lm_output(
bert_config, model.get_sequence_output(), model.get_embedding_table(),
masked_lm_positions, masked_lm_ids, masked_lm_weights)
(next_sentence_loss, next_sentence_example_loss,
next_sentence_log_probs) = get_next_sentence_output(
bert_config, model.get_pooled_output(), next_sentence_labels)
total_loss = masked_lm_loss + next_sentence_loss
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels):
"""Computes the loss and accuracy of the model."""
masked_lm_log_probs = tf.reshape(masked_lm_log_probs,
[-1, masked_lm_log_probs.shape[-1]])
masked_lm_predictions = tf.argmax(
masked_lm_log_probs, axis=-1, output_type=tf.int32)
masked_lm_example_loss = tf.reshape(masked_lm_example_loss, [-1])
masked_lm_ids = tf.reshape(masked_lm_ids, [-1])
masked_lm_weights = tf.reshape(masked_lm_weights, [-1])
masked_lm_accuracy = tf.metrics.accuracy(
labels=masked_lm_ids,
predictions=masked_lm_predictions,
weights=masked_lm_weights)
masked_lm_mean_loss = tf.metrics.mean(
values=masked_lm_example_loss, weights=masked_lm_weights)
next_sentence_log_probs = tf.reshape(
next_sentence_log_probs, [-1, next_sentence_log_probs.shape[-1]])
next_sentence_predictions = tf.argmax(
next_sentence_log_probs, axis=-1, output_type=tf.int32)
next_sentence_labels = tf.reshape(next_sentence_labels, [-1])
next_sentence_accuracy = tf.metrics.accuracy(
labels=next_sentence_labels, predictions=next_sentence_predictions)
next_sentence_mean_loss = tf.metrics.mean(
values=next_sentence_example_loss)
return {
"masked_lm_accuracy": masked_lm_accuracy,
"masked_lm_loss": masked_lm_mean_loss,
"next_sentence_accuracy": next_sentence_accuracy,
"next_sentence_loss": next_sentence_mean_loss,
}
eval_metrics = (metric_fn, [
masked_lm_example_loss, masked_lm_log_probs, masked_lm_ids,
masked_lm_weights, next_sentence_example_loss,
next_sentence_log_probs, next_sentence_labels
])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
return output_spec
return model_fn
def get_masked_lm_output(bert_config, input_tensor, output_weights, positions,
label_ids, label_weights):
"""Get loss and log probs for the masked LM."""
input_tensor = gather_indexes(input_tensor, positions)
with tf.variable_scope("cls/predictions"):
# We apply one more non-linear transformation before the output layer.
# This matrix is not used after pre-training.
with tf.variable_scope("transform"):
input_tensor = tf.layers.dense(
input_tensor,
units=bert_config.hidden_size,
activation=modeling.get_activation(bert_config.hidden_act),
kernel_initializer=modeling.create_initializer(
bert_config.initializer_range))
input_tensor = modeling.layer_norm(input_tensor)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
output_bias = tf.get_variable(
"output_bias",
shape=[bert_config.vocab_size],
initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
label_ids = tf.reshape(label_ids, [-1])
label_weights = tf.reshape(label_weights, [-1])
one_hot_labels = tf.one_hot(
label_ids, depth=bert_config.vocab_size, dtype=tf.float32)
# The `positions` tensor might be zero-padded (if the sequence is too
# short to have the maximum number of predictions). The `label_weights`
# tensor has a value of 1.0 for every real prediction and 0.0 for the
# padding predictions.
per_example_loss = -tf.reduce_sum(log_probs * one_hot_labels, axis=[-1])
numerator = tf.reduce_sum(label_weights * per_example_loss)
denominator = tf.reduce_sum(label_weights) + 1e-5
loss = numerator / denominator
return (loss, per_example_loss, log_probs)
def get_next_sentence_output(bert_config, input_tensor, labels):
"""Get loss and log probs for the next sentence prediction."""
# Simple binary classification. Note that 0 is "next sentence" and 1 is
# "random sentence". This weight matrix is not used after pre-training.
with tf.variable_scope("cls/seq_relationship"):
output_weights = tf.get_variable(
"output_weights",
shape=[2, bert_config.hidden_size],
initializer=modeling.create_initializer(bert_config.initializer_range))
output_bias = tf.get_variable(
"output_bias", shape=[2], initializer=tf.zeros_initializer())
logits = tf.matmul(input_tensor, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
log_probs = tf.nn.log_softmax(logits, axis=-1)
labels = tf.reshape(labels, [-1])
one_hot_labels = tf.one_hot(labels, depth=2, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, log_probs)
def gather_indexes(sequence_tensor, positions):
"""Gathers the vectors at the specific positions over a minibatch."""
sequence_shape = modeling.get_shape_list(sequence_tensor, expected_rank=3)
batch_size = sequence_shape[0]
seq_length = sequence_shape[1]
width = sequence_shape[2]
flat_offsets = tf.reshape(
tf.range(0, batch_size, dtype=tf.int32) * seq_length, [-1, 1])
flat_positions = tf.reshape(positions + flat_offsets, [-1])
flat_sequence_tensor = tf.reshape(sequence_tensor,
[batch_size * seq_length, width])
output_tensor = tf.gather(flat_sequence_tensor, flat_positions)
return output_tensor
def input_fn_builder(input_files,
max_seq_length,
max_predictions_per_seq,
is_training,
num_cpu_threads=4):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
name_to_features = {
"input_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"input_mask":
tf.FixedLenFeature([max_seq_length], tf.int64),
"segment_ids":
tf.FixedLenFeature([max_seq_length], tf.int64),
"masked_lm_positions":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_ids":
tf.FixedLenFeature([max_predictions_per_seq], tf.int64),
"masked_lm_weights":
tf.FixedLenFeature([max_predictions_per_seq], tf.float32),
"next_sentence_labels":
tf.FixedLenFeature([1], tf.int64),
}
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
if is_training:
d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files))
d = d.repeat()
d = d.shuffle(buffer_size=len(input_files))
# `cycle_length` is the number of parallel files that get read.
cycle_length = min(num_cpu_threads, len(input_files))
# `sloppy` mode means that the interleaving is not exact. This adds
# even more randomness to the training pipeline.
d = d.apply(
tf.contrib.data.parallel_interleave(
tf.data.TFRecordDataset,
sloppy=is_training,
cycle_length=cycle_length))
d = d.shuffle(buffer_size=100)
else:
d = tf.data.TFRecordDataset(input_files)
# Since we evaluate for a fixed number of steps we don't want to encounter
# out-of-range exceptions.
d = d.repeat()
# We must `drop_remainder` on training because the TPU requires fixed
# size dimensions. For eval, we assume we are evaluating on the CPU or GPU
# and we *don't* want to drop the remainder, otherwise we wont cover
# every sample.
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
num_parallel_batches=num_cpu_threads,
drop_remainder=True))
return d
return input_fn
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
if not FLAGS.do_train and not FLAGS.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
tf.gfile.MakeDirs(FLAGS.output_dir)
input_files = []
for input_pattern in FLAGS.input_file.split(","):
input_files.extend(tf.gfile.Glob(input_pattern))
tf.logging.info("*** Input Files ***")
for input_file in input_files:
tf.logging.info(" %s" % input_file)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=FLAGS.num_train_steps,
num_warmup_steps=FLAGS.num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size)
if FLAGS.do_train:
tf.logging.info("***** Running training *****")
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
train_input_fn = input_fn_builder(
input_files=input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=True)
estimator.train(input_fn=train_input_fn, max_steps=FLAGS.num_train_steps)
if FLAGS.do_eval:
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_input_fn = input_fn_builder(
input_files=input_files,
max_seq_length=FLAGS.max_seq_length,
max_predictions_per_seq=FLAGS.max_predictions_per_seq,
is_training=False)
result = estimator.evaluate(
input_fn=eval_input_fn, steps=FLAGS.max_eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with tf.gfile.GFile(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
flags.mark_flag_as_required("input_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| |
#!/usr/bin/python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import re
import sys
from distutils.version import LooseVersion
from refresh_source import Refresher
from spinnaker.run import run_quick
class CommitTag:
"""Provides a model class to capture the output of 'git show-ref --tags'.
We also capture the tag versions using `distutils.version` for easy semantic
version comparison for sorting.
"""
def __init__(self, ref_line):
# ref_line is in the form "$commit_hash refs/tags/$tag"
tokens = ref_line.split(' ')
self.__hash = tokens[0]
tag_parts = tokens[1].split('/')
self.__tag = tag_parts[len(tag_parts) - 1]
self.__version = LooseVersion(self.__tag)
def __repr__(self):
return 'hash: %s, tag: %s, version: %s' % (self.__hash, self.__tag, self.__version)
@property
def hash(self):
return self.__hash
@property
def tag(self):
return self.__tag
@property
def version(self):
return self.__version
class CommitMessage:
"""Provides a model class to capture the output of 'git log --pretty'.
"""
def __init__(self, hash, msg):
self.__hash = hash
self.__msg = msg
def __repr__(self):
return 'hash: %s, message: %s' % (self.__hash, self.__msg)
@property
def hash(self):
return self.__hash
@property
def msg(self):
return self.__msg
class VersionBump:
"""Provides a model for a semantic version bump.
"""
def __init__(self, version_str, commit_hash, major=False, minor=False, patch=False):
self.__version_str = version_str
self.__commit_hash = commit_hash
self.__major = major
self.__minor = minor
self.__patch = patch
def __repr__(self):
return ('version_str: {}, commit_hash: {}, major: {}, minor: {}, patch: {}'
.format(self.version_str,
self.commit_hash,
self.major,
self.minor,
self.patch))
def __eq__(self, other):
return (self.version_str == other.version_str
and self.commit_hash == other.commit_hash
and self.major == other.major
and self.minor == other.minor
and self.patch == other.patch)
@property
def commit_hash(self):
return self.__commit_hash
@property
def version_str(self):
return self.__version_str
@property
def major(self):
return self.__major
@property
def minor(self):
return self.__minor
@property
def patch(self):
return self.__patch
class GitTagMissingException(Exception):
"""Exception for misconfigured git tags in the operating repository."""
def __init__(self, message):
self.message = message
class Annotator(object):
"""Provides semantic version tagging for Spinnaker repositories.
Each Spinnaker repository has tags that denote releases. These tags follow
semantic versioning. At the present time, there are two sets of tags in use
for the Spinnaker repositories: 'vX.Y.Z' for Netflix releases and 'version-X.Y.Z-$build'
for Spinnaker product releases. This class handles annotations of the
'version-X.Y.Z-$build' pattern.
This class provides support for resolving semantic version tags
based on commit messages and annotating local source trees with the
tagging information. It is assumed that the commit messages follow
conventional-changelog commit message conventions. This class also provides
support for creating release branches and pushing to and pulling from remote
repositories through extending the Refresher class.
"""
# regex for 'version-X.Y.Z' versions
TAG_MATCHER = re.compile('^version-[0-9]+\.[0-9]+\.[0-9]+$')
def __init__(self, options, path=None, next_tag=None):
self.__next_tag = next_tag or options.next_tag
self.__path = path or options.path
self.__branch = options.branch
self.__build_number = options.build_number or os.environ.get('BUILD_NUMBER', '0')
self.__force_rebuild = options.force_rebuild
self.__tags_to_delete = []
self.__filtered_tags = []
self.__current_version = None
@property
def build_number(self):
return self.__build_number
@property
def current_version(self):
return self.__current_version
@property
def branch(self):
return self.__branch
@branch.setter
def branch(self, branch):
self.__branch = branch
@property
def path(self):
return self.__path
@path.setter
def path(self, path):
self.__path = path
def __partition_tags_on_pattern(self):
"""Partitions the tags into two lists based on TAG_MATCHER.
One of the lists of tags will be deleted locally (self.__tags_to_delete) so
gradle will use our tag version as the package version during the
build/publish task.
One of the lists will be used to determine the next semantic version
for out tag pattern (self.__filtered_tags).
"""
tag_ref_result = run_quick('git -C {path} show-ref --tags'
.format(path=self.path),
echo=False)
ref_lines = tag_ref_result.stdout.strip().split('\n')
hash_tags = [CommitTag(s) for s in ref_lines]
self.__filtered_tags = [ht for ht in hash_tags if self.TAG_MATCHER.match(ht.tag)]
self.__tags_to_delete = [ht for ht in hash_tags if not self.TAG_MATCHER.match(ht.tag)]
def parse_git_tree(self):
self.__partition_tags_on_pattern()
self.__determine_current_version()
def tag_head(self):
"""Tags the current branch's HEAD with the next semver tag.
Returns:
[VersionBump]: The version bump used to tag the git repository, or None
if the tagging fails.
"""
if self.__is_head_current():
# We manually specified a tag and want to override with that one.
if self.__next_tag:
self.__tag_head_with_build(self.__next_tag)
run_quick('git -C {path} tag {next_tag} HEAD'
.format(path=self.path, next_tag=self.__next_tag))
return VersionBump(self.__next_tag, self.get_head_commit())
# We didn't manually specify, but want to force a rebuild of the old tag.
elif self.__force_rebuild:
self.__tag_head_with_build(self.__current_version.tag)
run_quick('git -C {path} tag {next_tag} HEAD'
.format(path=self.path, next_tag=self.__current_version.tag))
return VersionBump(self.__current_version.tag, self.get_head_commit(), patch=True)
# Else fail.
else:
logging.warn("There is already a tag of the form 'version-X.Y.Z' at HEAD. Not forcing rebuild.")
return None
else:
version_bump = self.determine_new_tag()
# This tag is for logical identification for developers. This will be pushed
# to the upstream git repository if we choose to use this version in a
# formal Spinnaker product release.
run_quick('git -C {path} tag {next_tag} HEAD'
.format(path=self.path, next_tag=version_bump.version_str))
self.__tag_head_with_build(version_bump.version_str)
return version_bump
def __tag_head_with_build(self, version_bump_tag):
"""Tags the current branch's HEAD with the next semver gradle build tag.
Args:
version_bump_tag [String]: Semver string to add as a gradle build tag.
"""
next_tag_with_build = '{0}-{1}'.format(version_bump_tag,
self.build_number)
# This tag is for gradle to use as the package version. It incorporates the
# build number for uniqueness when publishing. This tag is of the form
# 'X.Y.Z-$build_number' for gradle to use correctly. This is not pushed
# to the upstream git repository.
first_dash_idx = next_tag_with_build.index('-')
gradle_version = next_tag_with_build[first_dash_idx + 1:]
run_quick('git -C {path} tag {next_tag} HEAD'
.format(path=self.path, next_tag=gradle_version))
def delete_unwanted_tags(self):
"""Locally deletes tags that don't match TAG_MATCHER.
This is so that gradle will use the latest resolved semantic version from
our tag pattern when it builds the package.
"""
print ('Deleting {0} unwanted git tags locally from {1}'
.format(len(self.__tags_to_delete), self.path))
for bad_hash_tag in self.__tags_to_delete:
run_quick('git -C {path} tag -d {tag}'
.format(path=self.path, tag=bad_hash_tag.tag), echo=False)
def checkout_branch(self):
"""Checks out a branch.
"""
run_quick('git -C {path} checkout {branch}'.format(path=self.path,
branch=self.branch))
def get_head_commit(self):
"""Retrieves the head commit hash.
"""
head_commit_res = run_quick('git -C {path} rev-parse HEAD'
.format(path=self.path),
echo=False)
return head_commit_res.stdout.strip()
def __is_head_current(self):
"""Checks if the current version is at HEAD.
Returns:
[Boolean]: True if the current version tag is on HEAD, else False.
"""
head_commit = self.get_head_commit()
return self.__current_version.hash == head_commit
def __determine_current_version(self):
"""Determines and stores the current (latest) semantic version from
'version-X.Y.Z' tags.
"""
sorted_filtered_tags = sorted(self.__filtered_tags,
key=lambda ht: ht.version, reverse=True)
if len(sorted_filtered_tags) == 0:
raise GitTagMissingException("No version tags of the form 'version-X.Y.Z'.")
self.__current_version = sorted_filtered_tags[0]
def determine_new_tag(self):
"""Determines the next semver tag for the repository at the path.
If the commit at HEAD is already tagged with a tag matching --tag_regex_str,
this function is a no-op. Otherwise it determines the semantic version bump
for the commits since the last tag matching 'version-X.Y.Z' and suggests a new tag
based on the commit messages. This suggestion can be overridden with
--next_tag, which will be used if there are any commits after the last
semver tag matching 'version-X.Y.Z'.
Returns:
[VersionBump]: Next semantic version tag to be used, along with what type
of version bump it was. Version tag is of the form 'version-X.Y.Z'.
"""
if self.__next_tag:
return VersionBump(self.__next_tag, self.get_head_commit())
# 'git log' entries of the form '$hash $commit_title'
log_onelines = run_quick('git -C {path} log --pretty=oneline'.format(path=self.path),
echo=False).stdout.strip().split('\n')
commit_hashes = [line.split(' ')[0].strip() for line in log_onelines]
# Full commit messages, including bodies for finding 'BREAKING CHANGE:'.
msgs = [
run_quick('git -C {path} log -n 1 --pretty=medium {hash}'.format(path=self.path, hash=h),
echo=False).stdout.strip() for h in commit_hashes
]
if len(commit_hashes) != len(msgs):
raise IOError('Git commit hash list and commit message list are unequal sizes.')
return self.bump_semver(self.__current_version, commit_hashes, msgs)
def bump_semver(self, curr_version, commit_hashes, commit_msgs):
"""Determines the semver version bump based on commit messages in 'git log'.
Uses 'conventional-changelog' format to search for features and breaking
changes.
Args:
curr_version [CommitTag]: Latest 'version-X.Y.Z' tag/commit hash pair
calcluated by semver sort.
commit_hashes [String list]: List of ordered commit hashes.
commit_msgs [String list]: List of ordered, full commit messages.
Returns:
[VersionBump]: Next semantic version tag to be used, along with what type
of version bump it was.
"""
# Commits are output from 'git log ...' ordered most recent to least.
commits_iter = iter([CommitMessage(hash, msg) for hash, msg in zip(commit_hashes, commit_msgs)])
commit = next(commits_iter, None)
head_commit_hash = commit.hash
feat_matcher = re.compile('feat\(.*\)*')
bc_matcher = re.compile('BREAKING CHANGE')
feature = False
breaking_change = False
first_dash_idx = curr_version.tag.index('-')
if first_dash_idx == -1:
raise GitTagMissingException("No version tags of the form 'version-X.Y.Z'.")
major, minor, patch = curr_version.tag[first_dash_idx + 1:].split('.')
# TODO(jacobkiefer): Fail if changelog conventions aren't followed?
while commit is not None and commit.hash != curr_version.hash:
msg_lines = commit.msg.split('\n')
if any([bc_matcher.match(m.strip()) for m in msg_lines]):
breaking_change = True
break # Breaking change has the highest precedence.
if any([feat_matcher.match(m.strip()) for m in msg_lines]):
feature = True
commit = next(commits_iter, None)
if breaking_change == True:
return VersionBump(
'version-' + str(int(major) + 1) + '.0.0', head_commit_hash, major=True)
elif feature == True:
return VersionBump(
'version-' + major + '.' + str(int(minor) + 1) + '.0', head_commit_hash, minor=True)
else:
return VersionBump(
'version-' + major + '.' + minor + '.' + str(int(patch) + 1), head_commit_hash, patch=True)
@classmethod
def init_argument_parser(cls, parser):
"""Initialize command-line arguments."""
parser.add_argument('--build_number', default=os.environ.get('BUILD_NUMBER'),
help='The build number to append to the semantic version tag.')
parser.add_argument('--branch', default='master',
help='Git branch to checkout.')
parser.add_argument('--next_tag', default='',
help='Tag to use as the next tag instead of determining the next semver tag.')
parser.add_argument('--path', default='.',
help='Path to the git repository we want to annotate.')
parser.add_argument('--force_rebuild', default=False, action='store_true',
help='Force a rebuild even if there is a git tag at HEAD.')
@classmethod
def main(cls):
parser = argparse.ArgumentParser()
cls.init_argument_parser(parser)
options = parser.parse_args()
annotator = cls(options)
annotator.checkout_branch()
annotator.parse_git_tree()
annotator.tag_head()
annotator.delete_unwanted_tags()
if __name__ == '__main__':
sys.exit(Annotator.main())
| |
######################################################################################
##Revised Based on Yorick_Multitau_Code Obtained from Dr. Andrei Fluerasu###############
#################### Coded by Dr. Yugang Zhang #################################
#################### 631-885-4714 ############################################
#################### Email: yuzhang@bnl.gov ####################################
########################################################################################
#################### or: yuzhangnew@icloud.com ##################################
#################### At Brookhaven National Lab #########################################
############################## Dec 20, 2015, Ver0##########################################
##############################Jan 26, 2015 ,Ver1, Add array_method################################
#######Feb 01,Ver2, Add read_pixellist, qind, qradi from a txt file############
########Feb 18,Ver3, Add two-time correlation function from large array ########
######Feb 22,Ver4, Add two-time correlation function by image_read method#######
#######Feb 28,Ver5, corrected time-index of two-time correlation function by image_read method##########
##########March 1,Ver6 add the method get one-time from the diag of two-time##################
##########March 2,Ver7 do cleaning##############################################
##########March 7,Ver8 add frame_inten,fram_pixel_waterfall,two_time_show2##################################
#######################################################################################
######################################################################################
######################################################################################
from numpy import arange, array,zeros,linspace,round,hypot, ones,sqrt, int_, hstack, vstack
from numpy import digitize,where,copy,indices,histogram,bincount,tril,diag
from numpy import savetxt, log, column_stack,intersect1d, save, load
from math import sin,cos,pi
#from scipy.interpolate import interp1d
#import matplotlib.pyplot as plt
import time
import os
import sys
#from GetEdf import get_edf,sumfile
#from create_and_read_data import cpopen,cpdump
#from create_and_read_data import make_img_array,make_img_pixel_array
#from load_data import read_ring_data, read_txt
#from img_process import show_img,show_mask,show_edf_with_mask, interp_zeros
from img_process import *
from Init_for_Bfiber import * # the setup file
T=True
F=False
class xpcs( object):
def __init__(self, PAR=PAR):
""" DOCUMENT __init__( PAR )
the initilization of the XPCS class
KEYWORD: PAR, gives the parameters, including nobuf,nolev,begframe,
noframes, if dark_img, it will use DK as dark_img """
global nobuf,nolev,begframe,noframes
self.version='version_11'
#print PAR
if PAR['dark_img']:
if DK==None:print 'Get dark_img from data.';self.backg() #to get a background
else:print 'Get dark_img from a file.'
else:print 'No Dark Images are used!'
nobuf=PAR['nobuf']
nolev=PAR['nolev']
begframe=PAR['begframe']
noframes=PAR['noframes']
self.noframes=noframes
self.begframe = begframe
def clear(self,):
''' DOCUMENT clear( )
clear all the global variables
KEYWORD: None '''
global buf,G,IAP,IAF,num,cts,cur,g2,gmax,sigg2
buf=0;G=0;IAP=0;
IAF=0;num=0;cts=0;
cur=0;g2=0;gmax=0;sigg2=0
ttx=0;Ndel=0;Npix=0;
g2a=0;g2a2=0;g12=0;g12a=0;g12L=0;g12s=0;
g12x=0;g12y=0;g12z=0
g12r=0;
def backg(self,):
''' DOCUMENT backg( )
return DK: the average of a series of dark images.
KEYWORD: None '''
global DK
DK = sumfile( FILENAME, first_dark, first_dark+number_darks-1, avgflag=1, ext='.edf')
def delays(self,time=1,nolevs=None,nobufs=None, correct=True):
''' DOCUMENT delays(time=)
return array of delays.
KEYWORD: time: scale delays by time ( should be time between frames)
'''
global nolev,nobuf,dly, dict_dly
if nolevs==None:nolevs=nolev
if nobufs==None:nobufs=nobuf
if nobufs%2!=0:print "nobuf must be even!!!"
dly=zeros( (nolevs+1)*nobufs/2 +1 )
dict_dly ={}
for i in range( 1,nolevs+1):
if i==1:imin= 1
else:imin=nobufs/2+1
ptr=(i-1)*nobufs/2+ arange(imin,nobufs+1)
dly[ptr]= arange( imin, nobufs+1) *2**(i-1)
dict_dly[i] = dly[ptr-1]
dly*=time
#self.dly=dly*timeperframe
if correct:self.dly=dly[1:]*timeperframe
else:self.dly=dly[:-1]*timeperframe
def get_min_dly(self):
''' DOCUMENT get_min_dly( )
return the min value of the dly_array.
KEYWORD: None
'''
global min_ind_dly
if len(where(dly-noframes >=0 )[0])==0:min_ind_dly = len(dly)
else:min_ind_dly = min( where(dly-noframes >0)[0] ) -1
#print min_ind_dly
def make_qlist(self):
''' DOCUMENT make_qlist( )
return a qlist by giving the noqs, qstart,qend,qwidth.
KEYWORD: None
'''
global qwidth,noqs,qend,qstart,qnum, qlist, qradi
global qlist,qradi
qradi = linspace(qstart,qend,noqs)
qlist=zeros(2*noqs)
qlist[::2]= round(qradi-qwidth/2) #render even value
qlist[1::2]= round(qradi+(1+qwidth)/2) #render odd value
qlist[::2]= int_(qradi-qwidth/2) #render even value
qlist[1::2]= int_(qradi+(1+qwidth)/2) #render odd value
if qlist_!=None:qlist=qlist_
def calqlist(self, qmask=None , shape='circle',
asym=False,theta=0, center=None):
''' DOCUMENT calqlist( qmask=,shape=, )
calculate the equvilent pixel with a shape,
return pixellist,nopixels,qind,nopr
KEYWORD: qmask, a mask file;
shape='circle', give a circle shaped qlist
shape='column', give a column shaped qlist
shape='row', give a row shaped qlist
shape='slope', give a line-shape qlist by
the rotation of a row line with center and tan(theta) degree
"to be finished yet for the theta"
'''
global pixellist,nopixels,qind,nopr
self.make_qlist()
y, x = indices( [dimx,dimy] )
if shape=='circle':
y_= y- ybar +1;x_=x-xbar+1
r= int_( hypot(x_, y_) + 0.5 )#why add 0.5?
elif shape=='column':
r= x
elif shape=='row':
r=y
elif shape=='slope':
if center==None:cx,cy=[(dimy/2.),(dimx/2.)]
else:cx,cy=center[1]/1.,center[0]/1.
a= cx/(cx+theta*cy)
b=theta*cx/(cx+theta*cy)
print a,b,cx,cy
r = x*a + y*b
r=array(r,dtype=int32)
#print r
r= r.flatten()
#print qlist
noqrs = len(qlist)
qind = digitize(r, qlist)
if qmask==None:
w_= where( (qind)%2 )# qind should be odd;print 'Yes'
w=w_[0]
else:
a=where( (qind)%2 )[0]
b=where( mask.flatten() )[0]
#print a.shape,b.shape
w= intersect1d(a,b)
nopixels=len(w)
qind=qind[w]/2
#pixellist= ( y*imgwidth +x ).flatten() [w]
pixellist= ( y*dimy +x ).flatten() [w]
nopr,bins=histogram( qind, bins= range( len(qradi) +1 ))
###########################################################################
########for frame_inten######################################
##################################################################
def fram_inten( self, frame_start, frame_end):
global fint
nofra = frame_end+1-frame_start
fint= zeros( [nofra, noqs])
start_time = time.time()
for i in range(0, nofra ):
n= frame_start + i
fp = FILENAME + '%04d'%n
if img_format=='EDF':fp+='.edf';img= get_edf( fp ) - DK
elif img_format=='TIFF':fp+='.tiff';img = scipy.misc.imread(fp,flatten=1)
else:img= cpopen( n= n,prefix= 'data_', inDir=DATA_DIR)
imgf=img.flatten()[pixellist]
img=[]
fint[i]= (histogram(qind, bins=noqs, weights= imgf))[0]
if int(i %( (frame_end+1-frame_start) /10)) ==0:
sys.stdout.write("#")
sys.stdout.flush()
elapsed_time = time.time() - start_time
print 'Total time: %.2f min' %(elapsed_time/60.)
#return fint
###########################################################################
########for fram_pixel_waterfall######################################
##################################################################
##fram_pixel_waterfall
def fram_pixel_waterfall( self, frame_start, frame_end, q=None):
global pintq,pint_dict
start_time = time.time()
nofra = frame_end+1-frame_start
if q==None:
pint_dict={}
for i in range(noqs):
pint_dict[i]=zeros( [nofra, nopr[i]])
else:
pintq = zeros( [nofra, nopr[q]])
#print pintq.shape
for i in range(0, nofra ):
n= frame_start + i
fp = FILENAME + '%04d'%n
if img_format=='EDF':fp+='.edf';img= get_edf( fp ) - DK
elif img_format=='TIFF':fp+='.tiff';img = scipy.misc.imread(fp,flatten=1)
else:img= cpopen( n= n,prefix= 'data_', inDir=DATA_DIR)
imgf=img.flatten()[pixellist]
img=[]
if q!=None:
pintq[i]= imgf[ qind==q]
#print pintq[i].shape
else:
for q_ in range(noqs):
pint_dict[q_][i] = imgf[ qind==q_]
if int(n %( (frame_end+1-frame_start) /10)) ==0:
sys.stdout.write("#")
sys.stdout.flush()
elapsed_time = time.time() - start_time
print 'Total time: %.2f min' %(elapsed_time/60.)
###########################################################################
########for one_time correlation function using image-reading method
##################################################################
def process(self,lev,bufno, n=None):
num[lev]+=1
if lev==0:imin=0
else:imin=nobuf/2
for i in range(imin, min(num[lev],nobuf) ):
ptr=lev*nobuf/2+i
delayno=(bufno-i)%nobuf
IP=buf[lev,delayno]
IF=buf[lev,bufno]
G[ptr]+= ( (histogram(qind, bins=noqs, weights= IF*IP))[0]/nopr-G[ptr] )/ (num[lev]-i)
IAP[ptr]+= ( (histogram(qind, bins=noqs, weights= IP))[0]/nopr-IAP[ptr] )/ (num[lev]-i)
IAF[ptr]+= ( (histogram(qind, bins=noqs, weights= IF))[0]/nopr-IAF[ptr] )/ (num[lev]-i)
def insertimg(self, n, norm=None, print_=False, brute=False):
cur[0]=1+cur[0]%nobuf
fp = FILENAME + '%04d'%n #+'.edf'
if img_format=='EDF':fp+='.edf';img= get_edf( fp ) - DK
elif img_format=='TIFF':fp+='.tiff';img = scipy.misc.imread(fp,flatten=1)
else:img= cpopen( n= n,prefix= 'data_', inDir=DATA_DIR)
if print_:print 'The insert image %s is %s' %(n,fp)
buf[0, cur[0]-1 ]=img.flatten()[pixellist]
img=[] #//save space
self.process(lev=0, bufno=cur[0]-1, n=n )
processing=1
if not brute:
lev=1
while processing:
if cts[lev]:
prev= 1+ (cur[lev-1]-1-1+nobuf)%nobuf
cur[lev]= 1+ cur[lev]%nobuf
buf[lev,cur[lev]-1] = ( buf[lev-1,prev-1] + buf[lev-1,cur[lev-1]-1] ) /2
cts[lev]=0
self.process(lev= lev, bufno= cur[lev]-1 , n=n)
lev+=1
if lev<nolev:processing = 1
else:processing = 0
else:
cts[lev]=1 #// set flag to process next time
processing=0 #// can stop until more images are accumulated
def autocor( self, print_=False, save_=True, brute=False,filename=None):
global buf,G,IAP,IAF,num,cts,cur,g2,gmax,sigg2
global Ndel,Npix
start_time = time.time()
#initialize all arrays
buf=zeros([nolev,nobuf,nopixels]) #// matrix of buffers
cts=zeros(nolev)
cur=ones(nolev) * nobuf
G=zeros( [(nolev+1)*nobuf/2,noqs])
IAP=zeros( [(nolev+1)*nobuf/2,noqs])
IAF=zeros( [(nolev+1)*nobuf/2,noqs])
num= array(zeros( nolev ),dtype='int')
ttx=0
for n in range(1,noframes +1 ):
self.insertimg(begframe+n-1, print_=print_,brute=brute)
if n %(noframes/10) ==0:
sys.stdout.write("#")
sys.stdout.flush()
elapsed_time = time.time() - start_time
print 'Total time: %.2f min' %(elapsed_time/60.)
#print G.shape
if len(where(IAP==0)[0])!=0:gmax = where(IAP==0)[0][0]
else:gmax=IAP.shape[0]
#g2=G/(IAP*IAF)
g2=(G[:gmax]/(IAP[:gmax]*IAF[:gmax]))
if save_:
if filename==None:filename='g2_-%s-%s_ImgReadMethod_'%(
begframe,begframe+noframes-1)
save( RES_DIR + filename+FOUT, g2)
print 'the %s was stored in %s'%(filename,RES_DIR)
return g2,elapsed_time/60.
###########################################################################
########for two_time correlation function using image-reading method
##################################################################
def process_two_time(self,lev,bufno,n):
num[lev]+=1
if lev==0:imin=0
else:imin=nobuf/2
for i in range(imin, min(num[lev],nobuf) ):
ptr=lev*nobuf/2+i
delayno=(bufno-i)%nobuf #//cyclic buffers
IP=buf[lev,delayno]
IF=buf[lev,bufno]
I_t12 = (histogram(qind, bins=noqs, weights= IF*IP))[0]
I_t1 = (histogram(qind, bins=noqs, weights= IP))[0]
I_t2 = (histogram(qind, bins=noqs, weights= IF))[0]
tind1 = (n-1);tind2=(n -dly[ptr] -1)
if not isinstance( n, int ):
nshift = 2**(lev-1)
for i in range( -nshift+1, nshift +1 ):
#print tind1+i
g12[ int(tind1 + i), int(tind2 + i) ] =I_t12/( I_t1 * I_t2) * nopr
else:
#print tind1
g12[ tind1, tind2 ] = I_t12/( I_t1 * I_t2) * nopr
def insertimg_twotime(self, n, norm=None, print_=False):
cur[0]=1+cur[0]%nobuf # increment buffer
fp = FILENAME + '%04d'%n
if img_format=='EDF':fp+='.edf';img= get_edf( fp ) - DK
elif img_format=='TIFF':fp+='.tiff';img = scipy.misc.imread(fp,flatten=1)
else:img= cpopen( n= n,prefix= 'data_', inDir=DATA_DIR)
if print_:print 'The insert image %s is %s' %(n,fp)
buf[0, cur[0]-1 ]=img.flatten()[pixellist]
img=[] #//save space
countl[0] = 1+ countl[0]
current_img_time = n - begframe +1
self.process_two_time(lev=0, bufno=cur[0]-1,n=current_img_time )
time_ind[0].append( current_img_time )
processing=1
lev=1
while processing:
if cts[lev]:
prev= 1+ (cur[lev-1]-1-1+nobuf)%nobuf
cur[lev]= 1+ cur[lev]%nobuf
countl[lev] = 1+ countl[lev]
buf[lev,cur[lev]-1] = ( buf[lev-1,prev-1] + buf[lev-1,cur[lev-1]-1] ) /2
cts[lev]=0
t1_idx= (countl[lev]-1) *2
current_img_time = ((time_ind[lev-1])[t1_idx ] + (time_ind[lev-1])[t1_idx +1 ] )/2.
time_ind[lev].append( current_img_time )
self.process_two_time(lev= lev, bufno= cur[lev]-1,n=current_img_time )
lev+=1
#//Since this level finished, test if there is a next level for processing
if lev<nolev:processing = 1
else:processing = 0
else:
cts[lev]=1 #// set flag to process next time
processing=0 #// can stop until more images are accumulated
def autocor_two_time(self,print_=False,save_=True,filename=None):
global buf,num,cts,cur,g12, countl
global Ndel,Npix
global time_ind #generate a time-frame for each level
global g12x, g12y, g12z #for interpolate
start_time = time.time()
buf=zeros([nolev,nobuf,nopixels]) #// matrix of buffers, for store img
cts=zeros(nolev)
cur=ones(nolev) * nobuf
countl = array(zeros( nolev ),dtype='int')
g12 = zeros( [ noframes,noframes, noqs] )
g12x=[]
g12y=[]
g12z=[]
num= array(zeros( nolev ),dtype='int')
time_ind ={key: [] for key in range(nolev)}
ttx=0
for n in range(1,noframes +1 ): ##do the work here
self.insertimg_twotime(begframe+n-1, print_=print_)
if n %(noframes/10) ==0:
sys.stdout.write("#")
sys.stdout.flush()
for q in range(noqs):
x0 = g12[:,:,q]
g12[:,:,q] = tril(x0) + tril(x0).T - diag(diag(x0))
elapsed_time = time.time() - start_time
print 'Total time: %.2f min' %(elapsed_time/60.)
if save_:
if filename==None:
filename = 'g12_-%s-%s_ImgReadMethod_'%(
begframe,begframe+noframes-1)+FOUT
save( RES_DIR + filename+FOUT, g12)
print 'the %s was stored in %s'%(filename,RES_DIR)
return g12, (elapsed_time/60.)
###########################################################################
########for dislay the time correlation function
##################################################################
def show_two_time_correlation( self, data, interp=False,
vmin=None,vmax=None,filename='g12', title_on= True,
show=True,show_tau_lines= False):
'''the data should be a N*N array'''
if show_tau_lines:
linS = [];linE=[]
linS.append( zip( [0]*len(dly[:-1]), dly[:-1] ))
linE.append( zip( noframes-1- dly[:-1], [noframes-1]*len(dly[:-1])))
if interp:data=interp_zeros(data)
if show_tau_lines:
ax = plt.gca()
imshow(ax, data, vmin=vmin, vmax=vmax)
print 'show_tau'
for i, [ps,pe] in enumerate(zip(linS[0],linE[0])):
if i<nobuf:lev=0
else:lev = (i-nobuf)/(nobuf/2) +1
levn = lev/(nolev-1.0)
red = 1. - levn
#blue = levn
green=levn
linewidth=(2+lev*.4)*1.0
linewidth= 2
ax.plot( [ps[0], pe[0]],[ps[1],pe[1]],
linewidth=linewidth, color=(red , green,0) )
ax.set_xlim(0,noframes-1)
ax.set_ylim(0,noframes-1)
fp=RES_DIR + filename+ '.png'
plt.savefig( fp )
if show:plt.show()
else:
show_img(data, save=True, show=show, title_on=title_on,
title=filename, outDir=RES_DIR)
def show_multi_two_time_correlation( self, data=None, interp=False,
vmin=None, vmax=None, filename='g12', show=True,show_tau_lines= False):
if data==None:data=g12
if len(data.shape)==3:dx,dy, dq = data.shape
else:dq=1
for q in range(dq):
print q
if dq>1:y=data[:,:,q]
else:y=data
xp.show_two_time_correlation(
data = y, interp=interp, vmin=vmin, vmax=vmax,
filename=filename + '_q%s'%q, show=show,show_tau_lines=show_tau_lines)
y=0
def showg2(self,data='g2',show=True,save_=True,filename='g2' ):
sx= int( round (sqrt(noqs)) )
if noqs%sx==0:sy=noqs/sx
else:sy=noqs/sx+1
if data=='g2':data=g2
m,n=data.shape
fig = plt.figure()
fig.set_size_inches(20,10.5)
title=filename
plt.title(title,fontsize=24)
plt.axis('off')
for sn in range(0, n):
#ax = fig.add_subplot('%i'%sx,'%i'%sy,'%i'%(sn+1) )
ax = fig.add_subplot(sx,sy,sn+1 )
ax.text(.5,.9, 'q= %.5f A-1'%(qradi[sn]*qperpixel),fontsize=16, horizontalalignment='center',
transform=ax.transAxes)
#plt.title('q= %.3f'%qradi[sn],fontsize=16)
x=self.dly[:m]
#x=self.dly[1:m+1]
y=data[:,sn]
plt.plot(x, y, 'o',linewidth=3, ls='-', color='b',markersize=8)
#ax.set_xscale('log')
plt.xscale('log')
#print max(y)
plt.ylim([ min(y) , max(y[1:]) ])
plt.xlim([ min(x)+0*1e-6, max(x)])
#plt.ylim([ 1, 2])
plt.xlabel('time delay',fontsize=12)
plt.ylabel('g2',fontsize=12)
if save_:
plt.savefig( RES_DIR + filename +'.png' )
#cpdump(data,FOUT+'_g2',RES_DIR)
#cpdump(sigg2,FOUT+'_sigg2',RES_DIR)
if show:plt.show()
############################################
#to get the waterfall plot
# xp=xpcs();
# xp.calqlist( shape='column');
# xp.fram_pixel_waterfall(1,1400)
# show_img(pint_dict[0][:,:],aspect=3)
#to show a image with mask
#show_img_with_mask(FILENAME +'0001',pixellist, qind,img_format='TIFF',show_mask=T,logs=F)
| |
# pylint: disable=import-error, wildcard-import, invalid-name, undefined-variable, no-member, attribute-defined-outside-init
__author__ = 'floriandienesch'
from Objekte import planet, fixstern, mond
from Model.solarSysModel import *
from Objekte.lighting import Lighting
from PyQt5 import QtCore, QtWidgets, QtGui
from Objekte.planet import Planet
from Objekte.mond import Mond
from Objekte.fixstern import Fixstern
import time
class Weltall(QtWidgets.QWidget):
"""
This class is our universe.
We can display here our planets, stars or our moon
"""
def __init__(self, parent=None):
"""
Constructor
Initialize the variables
"""
QtWidgets.QWidget.__init__(self)
self.model = SolarSunModel()
self.lighting = Lighting()
self.planet = Planet()
self.mond = Mond()
self.fixstern = Fixstern()
def InitGL(self):
"""
Method InitGL
This Method initializes our Solar System
It sets the lighting and enables or rather sets the textures
"""
# make the objects transparent
glEnable(GL_DEPTH_TEST)
# enable texturing
glEnable(GL_TEXTURE_2D)
# enable lighting
self.lighting.enableLighting()
# add a new light
self.lighting.addLight(GL_LIGHT0)
# set the position of the light to the sun
self.lighting.setLight(GL_LIGHT0, self.model.lightOn[0], self.model.lightOn[1],
self.model.lightOn[2], self.model.lightOn[3])
# set the textures when starting the program
if self.model.fileSet == False:
try:
self.imageIDMoon = self.model.t.textureOrbit(self.model.file[0])
self.imageIDEarth = self.model.t.textureOrbit(self.model.file[1])
self.imageIDSun = self.model.t.textureOrbit(self.model.file[2])
self.imageIDJupiter = self.model.t.textureOrbit(self.model.file[3])
except:
print("Can't find the textures!")
# load the textures which are assigned by the user
else:
self.imageIDMoon = self.model.t.textureOrbit(self.model.file[0][0])
self.imageIDEarth = self.model.t.textureOrbit(self.model.file[1][0])
self.imageIDSun = self.model.t.textureOrbit(self.model.file[2][0])
self.imageIDJupiter = self.model.t.textureOrbit(self.model.file[3][0])
# open the help window
self.help()
# set color of the back of the planet not to black
glEnable(GL_COLOR_MATERIAL)
# set the Backgroundcolor
glClearColor(0.0, 0.0, 0.0, 0.0)
def reSizeGLScene(self, width, height):
"""
Method reSizeGLScene
This function is called automatically by pyopengl when the window is resized
:param width: width of the window
:param height: height of the window
"""
# prevent A Divide By Zero If The Window Is Too Small
try:
if height == 0:
height = 1
# reset The Current Viewport And Perspective Transformation
glViewport(0, 0, width, height)
# save the values of the current width and height
self.model.width = width
self.model.height = height
except Exception:
print("Please enter an integer")
def drawGLScene(self):
"""
Method drawGLScene
This Function is called automatically by opengl
"""
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(self.model.zoom, float(self.model.width)/ float(self.model.height), 1, 110.0)
if self.model.perspective == 1:
gluLookAt(0, 8, 4, 0, 6, 0, 0, 1, 0)
glMatrixMode(GL_MODELVIEW)
# clear The Screen And The Depth Buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# reset The Weltall
glLoadIdentity()
# renders the light
self.lighting.render()
# bind the sun texture in our buffer
glBindTexture(GL_TEXTURE_2D, self.imageIDSun[2])
self.lighting.disableLighting()
# rotate the sun
self.fixstern.rotation(self.model.rot_sonne, self.model.speedSun, 0, 0)
# add the sun to our solar system
self.fixstern.addFixstern(1, self.model.rot_sonne, -4, 0, -12, 40, 20)
self.lighting.enableLighting()
# bind the earth texture in our buffer
glBindTexture(GL_TEXTURE_2D, self.imageIDEarth[2])
# rotate the earth
self.planet.rotation(self.model.rot_erde, 0, self.model.speedEarth, 0)
# add the earth to our solar system
self.planet.addPlanet(0.8, self.model.rot_erde, 0, 0, -15, 3.0, 3.0, 20, 20)
glBindTexture(GL_TEXTURE_2D, self.imageIDMoon[2])
self.mond.rotation(self.model.rot_mond, 0, self.model.speedMoon, 0)
self.mond.addMond(0.2, self.model.rot_mond, 0, 0, -12, 20, 20)
glBindTexture(GL_TEXTURE_2D, self.imageIDJupiter[2])
self.planet.rotation(self.model.rot_jupiter, 0, self.model.speedJupiter, 0)
self.planet.addPlanet(1, self.model.rot_jupiter, 0, 0, -15, 6.0, 6.0, 20, 20)
# limit our FPS to 60 FPS
time.sleep(1 / float(60))
# since this is double buffered, swap the buffers to display what just got drawn.
glutSwapBuffers()
def mousePressed(self, button, state, x, y):
"""
Method mousePressed
This function is called when the user clicks the left or
right mouse button
:param button: left or right button
:param state: fire the event on mouse-up or mouse-down
:param x: x-Coordinate of the mouse
:param y: y-Coordinate of the mouse
"""
# when the left mouse is pressed down
if state == GLUT_DOWN and button == GLUT_LEFT_BUTTON:
# if the light is already on, turn it off
if self.model.lightStatus == "On":
self.lighting.enableLighting()
self.lighting.setLight(GL_LIGHT0, self.model.lightOff[0], self.model.lightOff[1],
self.model.lightOff[2], self.model.lightOff[3])
self.model.lightStatus = "Off"
# if the light is already off, turn it off2
elif self.model.lightStatus == "Off":
self.lighting.setLight(GL_LIGHT0, self.model.lightOn[0], self.model.lightOn[1],
self.model.lightOn[2], self.model.lightOn[3])
self.model.lightStatus = "Off2"
# if the light is already off, turn it on
elif self.model.lightStatus == "Off2":
self.lighting.disableLighting()
self.model.lightStatus = "On"
# if the right button is pressed down
if state == GLUT_DOWN and button == GLUT_RIGHT_BUTTON:
# turn the textures on
if self.model.textures == True:
glEnable(GL_TEXTURE_2D)
self.model.textures = False
# turn the textures off
else:
glDisable(GL_TEXTURE_2D)
self.model.textures = True
def help(self):
"""
Method help
This displays an extern window with the description of the controls
"""
self.setGeometry(300, 300, 300, 330)
self.setWindowTitle('Solarsystem Help')
self.setToolTip('This is the <i>Help</i> of the <i>controls</i> ')
self.setMaximumSize(300, 330)
self.move(650, 0)
self.edit = QtWidgets.QTextEdit()
self.edit.setEnabled(False)
self.edit.append('<h1>Controls</h1>'
'<b><i>Mouse controls:</i></b>'
'<br>Turn light on/ off: <b>Left mouse click</b>'
'<br>Turn texture on/ off: <b>Right mouse click</b>'
'<br>'
'<br><b><i>Keyboard controls:</i></b>'
'<br>Increase speed of Planets: <b>d</b>'
'<br>Decrease speed of Planets: <b>a</b>'
'<br>Stop animation: <b>s</b>'
'<br>Load your own textures: <b>t</b>'
'<br>Switch view: <b>m</b>'
'<br>Zoom in: <b>x</b>'
'<br>Zoom out: <b>y</b>'
'<br>Switch to fullscreen mode: <b>f</b>'
'<br>Display this help: <b>h</b>'
'<br>Quit program: <b>ESC</b>')
layout = QtWidgets.QVBoxLayout(self)
layout.addWidget(self.edit)
self.setWindowOpacity(0.9)
self.show()
def keyPressed(self, *args):
"""
Method keyPressed
This function is called when a button on the keyboard is pressed
:param args: which button is pressed
"""
# speed up the planets
if args[0] == b'd':
self.model.speedEarth += 0.2
self.model.speedMoon += 0.2
self.model.speedSun = 0.1
self.model.speedJupiter += 0.2
# speed down the planets
if args[0] == b'a':
self.model.speedEarth -= 0.2
self.model.speedMoon -= 0.2
self.model.speedSun = 0.1
self.model.speedJupiter -= 0.2
# stop the planets
if args[0] == b's':
self.model.speedEarth = 0
self.model.speedMoon = 0
self.model.speedSun = 0
self.model.speedJupiter = 0
# start/ stop the fullsreen
if args[0] == b'f':
if self.model.fullscreen == False:
glutFullScreen()
self.model.fullscreen = True
else:
self.model.fullscreen = False
glutPositionWindow(0, 0)
glutReshapeWindow(640, 480)
# If escape is pressed, kill everything.
if args[0] == b'\x1b':
sys.exit()
# zoom out
if args[0] == b'x':
if int(self.model.zoom) < 20:
self.model.zoom = 20
else:
self.model.zoom -= 1
# zoom out
if args[0] == b'y':
if int(self.model.zoom) > 100:
self.model.zoom = 100
else:
self.model.zoom += 1
# assign new textures
if args[0] == b't':
self.model.fileSet = True
try:
self.model.file[0] = QtWidgets.QFileDialog.\
getOpenFileName(self, 'Load texture Moon', '/home')
self.model.file[1] = QtWidgets.QFileDialog\
.getOpenFileName(self, 'Load texture Earth', '/home')
self.model.file[2] = QtWidgets.QFileDialog.\
getOpenFileName(self, 'Load texture Sun', '/home')
self.model.file[3] = QtWidgets.QFileDialog.\
getOpenFileName(self, 'Load texture Jupiter', '/home')
except Exception:
if self.model.file[0] or self.model.file[1] \
or self.model.file[2] or self.model.file[3] == '':
print("empty")
print("empty")
self.InitGL()
if args[0] == b'h':
self.help()
if args[0] == b'm':
if self.model.perspective == 0:
self.model.perspective = 1
else:
self.model.perspective = 0
def main(self):
"""
Method main
This function initializes and start pyopengl
"""
# Select type of Display mode:
# Double buffer
# RGBA color
# Alpha components supported
# Depth buffer
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH)
# get a 640 x 480 window
glutInitWindowSize(640, 480)
# the window starts at the upper left corner of the screen
glutInitWindowPosition(0, 0)
# Okay, like the C version we retain the window id to
# use when closing, but for those of you new
# to Python (like myself), remember this assignment
# would make the variable local and not global
# if it weren't for the global declaration at the
# start of main.
glutCreateWindow("Solarsystem v1.1")
# Register the drawing function with glut, BUT in Python
# land, at least using PyOpenGL, we need to
# set the function pointer and invoke a function to
# actually register the callback, otherwise it
# would be very much like the C version of the code.
glutDisplayFunc(self.drawGLScene)
# When we are doing nothing, redraw the scene.
glutIdleFunc(self.drawGLScene)
# Register the function called when our window is resized.
glutReshapeFunc(self.reSizeGLScene)
# Register the function called when the keyboard is pressed.
glutKeyboardFunc(self.keyPressed)
# Register the function called when the mouse is clicked.
glutMouseFunc(self.mousePressed)
# Initialize our window.
self.InitGL()
# Start Event Processing Engine
glutMainLoop()
if __name__ == '__main__':
app = QtWidgets.QApplication(glutInit(sys.argv))
# load splashscreen
splash_pix = QtGui.QPixmap('../Splashscreen2.jpg')
# generate the splashscreen with the image
splash = QtWidgets.QSplashScreen(splash_pix, QtCore.Qt.WindowStaysOnTopHint)
# show the splashscreen
splash.show()
app.processEvents()
time.sleep(1)
start = Weltall()
splash.finish(start)
app.exit()
start.main()
| |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import subprocess
import sys
import tarfile
import tempfile
import test_server
import unittest
import zipfile
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
TOOLS_DIR = os.path.join(os.path.dirname(BUILD_TOOLS_DIR), 'tools')
sys.path.extend([BUILD_TOOLS_DIR, TOOLS_DIR])
import build_utils
import getos
import manifest_util
import oshelpers
MANIFEST_BASENAME = 'naclsdk_manifest2.json'
# Attribute '' defined outside __init__
# pylint: disable=W0201
class SdkToolsTestCase(unittest.TestCase):
def tearDown(self):
if self.server:
self.server.Shutdown()
oshelpers.Remove(['-rf', self.basedir])
def SetupDefault(self):
self.SetupWithBaseDirPrefix('sdktools')
def SetupWithBaseDirPrefix(self, basedir_prefix, tmpdir=None):
self.basedir = tempfile.mkdtemp(prefix=basedir_prefix, dir=tmpdir)
# We have to make sure that we build our updaters with a version that is at
# least as large as the version in the sdk_tools bundle. If not, update
# tests may fail because the "current" version (according to the sdk_cache)
# is greater than the version we are attempting to update to.
self.current_revision = self._GetSdkToolsBundleRevision()
self._BuildUpdater(self.basedir, self.current_revision)
self._LoadCacheManifest()
self.server = test_server.LocalHTTPServer(self.basedir)
def _GetSdkToolsBundleRevision(self):
"""Get the sdk_tools bundle revision.
We get this from the checked-in path; this is the same file that
build_updater uses to specify the current revision of sdk_tools."""
manifest_filename = os.path.join(BUILD_TOOLS_DIR, 'json',
'naclsdk_manifest0.json')
manifest = manifest_util.SDKManifest()
manifest.LoadDataFromString(open(manifest_filename, 'r').read())
return manifest.GetBundle('sdk_tools').revision
def _LoadCacheManifest(self):
"""Read the manifest from nacl_sdk/sdk_cache.
This manifest should only contain the sdk_tools bundle.
"""
manifest_filename = os.path.join(self.basedir, 'nacl_sdk', 'sdk_cache',
MANIFEST_BASENAME)
self.manifest = manifest_util.SDKManifest()
self.manifest.LoadDataFromString(open(manifest_filename, 'r').read())
self.sdk_tools_bundle = self.manifest.GetBundle('sdk_tools')
def _WriteManifest(self):
with open(os.path.join(self.basedir, MANIFEST_BASENAME), 'w') as stream:
stream.write(self.manifest.GetDataAsString())
def _BuildUpdater(self, out_dir, revision=None):
build_updater_py = os.path.join(BUILD_TOOLS_DIR, 'build_updater.py')
cmd = [sys.executable, build_updater_py, '-o', out_dir]
if revision:
cmd.extend(['-r', str(revision)])
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
_, _ = process.communicate()
self.assertEqual(process.returncode, 0)
def _BuildUpdaterArchive(self, rel_path, revision):
"""Build a new sdk_tools bundle.
Args:
rel_path: The relative path to build the updater.
revision: The revision number to give to this bundle.
Returns:
A manifest_util.Archive() that points to this new bundle on the local
server.
"""
self._BuildUpdater(os.path.join(self.basedir, rel_path), revision)
new_sdk_tools_tgz = os.path.join(self.basedir, rel_path, 'sdk_tools.tgz')
with open(new_sdk_tools_tgz, 'rb') as sdk_tools_stream:
archive_sha1, archive_size = manifest_util.DownloadAndComputeHash(
sdk_tools_stream)
archive = manifest_util.Archive('all')
archive.url = self.server.GetURL('%s/sdk_tools.tgz' % (rel_path,))
archive.checksum = archive_sha1
archive.size = archive_size
return archive
def _Run(self, args):
naclsdk_shell_script = os.path.join(self.basedir, 'nacl_sdk', 'naclsdk')
if getos.GetPlatform() == 'win':
naclsdk_shell_script += '.bat'
cmd = [naclsdk_shell_script, '-U', self.server.GetURL(MANIFEST_BASENAME)]
cmd.extend(args)
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
stdout, _ = process.communicate()
self.assertEqual(process.returncode, 0)
return stdout
def _RunAndExtractRevision(self):
stdout = self._Run(['-v'])
match = re.search('version r(\d+)', stdout)
self.assertTrue(match is not None)
return int(match.group(1))
class TestSdkTools(SdkToolsTestCase):
def testPathHasSpaces(self):
"""Test that running naclsdk from a path with spaces works."""
self.SetupWithBaseDirPrefix('sdk tools')
self._WriteManifest()
self._RunAndExtractRevision()
class TestBuildUpdater(SdkToolsTestCase):
def setUp(self):
self.SetupDefault()
def testUpdaterPathsAreSane(self):
"""Test that the paths to files in nacl_sdk.zip and sdktools.tgz are
relative to the output directory."""
nacl_sdk_zip_path = os.path.join(self.basedir, 'nacl_sdk.zip')
zip_stream = zipfile.ZipFile(nacl_sdk_zip_path, 'r')
try:
self.assertTrue(all(name.startswith('nacl_sdk')
for name in zip_stream.namelist()))
finally:
zip_stream.close()
# sdktools.tgz has no built-in directories to look for. Instead, just look
# for some files that must be there.
sdktools_tgz_path = os.path.join(self.basedir, 'sdk_tools.tgz')
tar_stream = tarfile.open(sdktools_tgz_path, 'r:gz')
try:
names = [m.name for m in tar_stream.getmembers()]
self.assertTrue('LICENSE' in names)
self.assertTrue('sdk_update.py' in names)
finally:
tar_stream.close()
class TestAutoUpdateSdkTools(SdkToolsTestCase):
def setUp(self):
self.SetupDefault()
def testNoUpdate(self):
"""Test that running naclsdk with current revision does nothing."""
self._WriteManifest()
revision = self._RunAndExtractRevision()
self.assertEqual(revision, self.current_revision)
def testUpdate(self):
"""Test that running naclsdk with a new revision will auto-update."""
new_revision = self.current_revision + 1
archive = self._BuildUpdaterArchive('new', new_revision)
self.sdk_tools_bundle.AddArchive(archive)
self.sdk_tools_bundle.revision = new_revision
self._WriteManifest()
revision = self._RunAndExtractRevision()
self.assertEqual(revision, new_revision)
def testManualUpdateIsIgnored(self):
"""Test that attempting to manually update sdk_tools is ignored.
If the sdk_tools bundle was updated normally (i.e. the old way), it would
leave a sdk_tools_update folder that would then be copied over on a
subsequent run. This test ensures that there is no folder made.
"""
new_revision = self.current_revision + 1
archive = self._BuildUpdaterArchive('new', new_revision)
self.sdk_tools_bundle.AddArchive(archive)
self.sdk_tools_bundle.revision = new_revision
self._WriteManifest()
stdout = self._Run(['update', 'sdk_tools'])
self.assertTrue(stdout.find('Ignoring manual update request.') != -1)
sdk_tools_update_dir = os.path.join(self.basedir, 'nacl_sdk',
'sdk_tools_update')
self.assertFalse(os.path.exists(sdk_tools_update_dir))
class TestAutoUpdateSdkToolsDifferentFilesystem(TestAutoUpdateSdkTools):
def setUp(self):
# On Linux (on my machine at least), /tmp is a different filesystem than
# the current directory. os.rename fails when the source and destination
# are on different filesystems. Test that case here.
self.SetupWithBaseDirPrefix('sdktools', tmpdir='.')
def main():
suite = unittest.defaultTestLoader.loadTestsFromModule(sys.modules[__name__])
result = unittest.TextTestRunner(verbosity=2).run(suite)
return int(not result.wasSuccessful())
if __name__ == '__main__':
sys.exit(main())
| |
'''
BSD 3-Clause License
Copyright (c) 2019, Donald N. Bockoven III
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from __future__ import division
import math
import Tkinter as tk
import ttk
import tkFont
import welds_elastic_method as wem
class Master_window:
def __init__(self, master):
self.master = master
self.weld_segments = []
self.group_result_labels = []
self.analysis_result_labels = []
self.design_result_label = []
self.group_built = 0
self.forces_run = 0
self.f_size = 8
self.f_type = tkFont.Font(family=' Courier New',size=self.f_size)
self.f_type_b_big = tkFont.Font(family=' Courier New',size=12, weight='bold')
self.f_type_b = tkFont.Font(family=' Courier New',size=self.f_size, weight='bold')
self.f_type_bu = tkFont.Font(family=' Courier New',size=self.f_size, weight='bold', underline = True)
self.menubar = tk.Menu(self.master)
self.menu = tk.Menu(self.menubar, tearoff=0)
self.menu_props = tk.Menu(self.menubar, tearoff=0)
self.menubar.add_cascade(label = "File", menu=self.menu)
self.menu.add_command(label="Quit", command=self.quit_app)
try:
self.master.config(menu=self.menubar)
except AttributeError:
self.master.tk.call(master, "config", "-menu", self.menubar)
# Frames
self.main_frame = tk.Frame(master, bd=2, relief='sunken', padx=1,pady=1)
self.main_frame.pack(anchor='c', padx= 1, pady= 1, fill=tk.BOTH, expand=1)
self.input_frame = tk.Frame(self.main_frame, bd=2, relief='sunken', padx=1,pady=1)
self.input_frame.pack(side=tk.LEFT, anchor='ne', padx= 1, pady= 1, fill=tk.BOTH, expand=1)
self.output_frame = tk.Frame(self.main_frame, bd=2, relief='sunken', padx=1,pady=1)
self.output_frame.pack(side=tk.RIGHT, anchor='nw', padx= 1, pady= 1, fill=tk.BOTH, expand=1)
self.output_canvas_frame = tk.Frame(self.output_frame, bd=2, relief='sunken', padx=1,pady=1)
self.output_canvas_frame.pack(padx= 1, pady= 1)
self.output_data_frame = tk.Frame(self.output_frame, bd=2, relief='sunken', padx=1,pady=1)
self.output_data_frame.pack(padx= 1, pady= 1, fill=tk.X, expand=1)
# Canvas
self.weld_canvas = tk.Canvas(self.output_canvas_frame, width=300, height=300, bd=2, relief='sunken', background="black")
self.weld_canvas.pack(side = tk.LEFT, anchor='c', padx= 1, pady= 1)
self.weld_canvas.bind("<Configure>", self.draw_weld)
# Input/output Notebooks
self.nb_inputs = ttk.Notebook(self.input_frame)
self.nb_inputs.pack(fill=tk.BOTH, expand=1)
self.nb_output_data = ttk.Notebook(self.output_data_frame)
self.nb_output_data.pack(fill=tk.BOTH, expand=1)
# Input Notebooks
self.tab_license = ttk.Frame(self.nb_inputs)
self.nb_inputs.add(self.tab_license , text='License')
license_string = ("Copyright (c) 2019, Donald N. Bockoven III\n"
"All rights reserved.\n\n"
"THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\""
" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE"
" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE"
" DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE"
" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL"
" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR"
" SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER"
" CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,"
" OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE"
" OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n"
"https://github.com/buddyd16/Structural-Engineering/blob/master/LICENSE"
)
tk.Label(self.tab_license, text=license_string,font=self.f_type, justify=tk.LEFT).grid(row=0, column=0)
self.tab_geometry = ttk.Frame(self.nb_inputs)
self.nb_inputs.add(self.tab_geometry , text='Weld Group Geometry - Input')
self.tab_loads = ttk.Frame(self.nb_inputs)
self.nb_inputs.add(self.tab_loads , text='Applied Loads and Base Materials - Input')
# Output Notebooks
self.tab_group_properties = ttk.Frame(self.nb_output_data, height=300)
self.nb_output_data.add(self.tab_group_properties , text='Weld Group Properties')
self.group_textbox = tk.Text(self.tab_group_properties, font=self.f_type)
self.group_textbox.grid(row=0, column=0, sticky="nsew", padx=4, pady=4)
self.tab_load_analysis = ttk.Frame(self.nb_output_data, height=300)
self.nb_output_data.add(self.tab_load_analysis , text='Load Analysis Results')
self.load_textbox = tk.Text(self.tab_load_analysis, font=self.f_type)
self.load_textbox.grid(row=0, column=0, sticky="nsew", padx=4, pady=4)
self.load_scroll = tk.Scrollbar(self.tab_load_analysis, command=self.load_textbox.yview)
self.load_scroll.grid(row=0, column=1, sticky='ns')
self.load_textbox['yscrollcommand'] = self.load_scroll.set
self.tab_aisc_design = ttk.Frame(self.nb_output_data, height=300)
self.nb_output_data.add(self.tab_aisc_design , text='AISC Design')
self.aisc_textbox = tk.Text(self.tab_aisc_design, font=self.f_type)
self.aisc_textbox.grid(row=0, column=0, sticky="nsew", padx=4, pady=4)
self.aisc_scroll = tk.Scrollbar(self.tab_aisc_design, command=self.aisc_textbox.yview)
self.aisc_scroll.grid(row=0, column=1, sticky='ns')
self.aisc_textbox['yscrollcommand'] = self.aisc_scroll.set
self.geometry_input_gui()
self.loads_base_material_input_gui()
def geometry_input_gui(self):
tk.Label(self.tab_geometry, text="Weld Segment:", font=self.f_type_b).grid(row=0,column=0, sticky = tk.W)
tk.Label(self.tab_geometry, text="Start x (in):", font=self.f_type).grid(row=1,column=0, sticky = tk.E)
self.start_x_in = tk.StringVar()
tk.Entry(self.tab_geometry, textvariable=self.start_x_in, width=10).grid(row=1,column=1, sticky = tk.W)
tk.Label(self.tab_geometry, text="Start y (in):", font=self.f_type).grid(row=1,column=2, sticky = tk.E, padx=5)
self.start_y_in = tk.StringVar()
tk.Entry(self.tab_geometry, textvariable=self.start_y_in, width=10).grid(row=1,column=3, sticky = tk.W)
tk.Label(self.tab_geometry, text="End x (in):", font=self.f_type).grid(row=2,column=0, sticky = tk.E)
self.end_x_in = tk.StringVar()
tk.Entry(self.tab_geometry, textvariable=self.end_x_in, width=10).grid(row=2,column=1, sticky = tk.W)
tk.Label(self.tab_geometry, text="End y (in):", font=self.f_type).grid(row=2,column=2, sticky = tk.E, padx=5)
self.end_y_in = tk.StringVar()
tk.Entry(self.tab_geometry, textvariable=self.end_y_in, width=10).grid(row=2,column=3, sticky = tk.W)
self.add_segment_button = tk.Button(self.tab_geometry,text = "Add Weld Segment", command = self.add_segment, font=self.f_type_b)
self.add_segment_button.grid(row=1,column=4, padx=5)
self.remove_last_segment_button = tk.Button(self.tab_geometry,text = "Remove Last\nWeld Segment", command = self.remove_last_segment, font=self.f_type_b)
self.remove_last_segment_button.grid(row=2,column=4, padx=5)
self.remove_all_segment_button = tk.Button(self.tab_geometry,text = "Remove All\nWeld Segments", command = self.remove_all, font=self.f_type_b)
self.remove_all_segment_button.grid(row=3,column=4, padx=5)
tk.Label(self.tab_geometry, text="Weld Segment List:", font=self.f_type_b).grid(row=3,column=0, sticky = tk.W)
self.segment_list_scrollbar = tk.Scrollbar(self.tab_geometry, orient="vertical", command=self.segment_list_scroll)
self.segment_list_scrollbar.grid(row=4, column=6, sticky='wns', pady=10)
self.segment_list = tk.Listbox(self.tab_geometry, height = 30, width = 130, font=self.f_type, yscrollcommand=self.segment_list_scrollbar.set)
self.segment_list.grid(row=4, column=0, columnspan=6, sticky='nsew', pady=10)
segment_label_key = """i = weld start coord. j = segment end coord. A = segment area = segment length\nIxo = x-axis moment of inertia about segment center Iyo = y-axis moment of inertia about segment center\nCenter = segment center coords. Cx = x distance to group centroid Cy = y distance to group centroid\nIx = x-axis moment of inertia about group center Iy = y-axis moment of inertia about group center"""
tk.Label(self.tab_geometry, text=segment_label_key, font=self.f_type, justify=tk.LEFT).grid(row=5,column=0,columnspan=6, sticky = tk.W, padx=5)
self.add_circle_button = tk.Button(self.tab_geometry, text='Add a Circular Weld', command = self.add_circle_function, font=self.f_type_b)
self.add_circle_button.grid(row=0,column=5, sticky = tk.W, padx=5)
tk.Label(self.tab_geometry, text='center x (in):',font=self.f_type).grid(row=1,column=5, sticky = tk.E, padx=5)
tk.Label(self.tab_geometry, text='center y (in):',font=self.f_type).grid(row=2,column=5, sticky = tk.E, padx=5)
tk.Label(self.tab_geometry, text='radius (in):',font=self.f_type).grid(row=3,column=5, sticky = tk.E, padx=5)
tk.Label(self.tab_geometry, text='start (deg.):',font=self.f_type).grid(row=1,column=7, sticky = tk.E, padx=5)
tk.Label(self.tab_geometry, text='end (deg.):',font=self.f_type).grid(row=2,column=7, sticky = tk.E, padx=5)
self.add_circle_ins = [tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar()]
i=0
for circle_info in self.add_circle_ins:
r = 1+i
if r<4:
tk.Entry(self.tab_geometry, textvariable=circle_info, width=10).grid(row=r,column=6, sticky = tk.W)
else:
tk.Entry(self.tab_geometry, textvariable=circle_info, width=10).grid(row=r-3,column=8, sticky = tk.W)
i+=1
def loads_base_material_input_gui(self):
tk.Label(self.tab_loads, text="** All Loads Assumed to Act at the Weld Group Centroid**", font=self.f_type_b_big).grid(row=0,column=0,columnspan=6, sticky = tk.W)
tk.Label(self.tab_loads, text="Loading:", font=self.f_type_b).grid(row=1,column=0, sticky = tk.W)
tk.Label(self.tab_loads, text="Materials:", font=self.f_type_b).grid(row=1,column=3, sticky = tk.W)
load_labels = ['Fz (lbs)= \nAxial', 'Fx (lbs)= \nShear X', 'Fy (lbs)= \nShear Y', 'Mx (in-lbs)= \nMoment About X', 'My (in-lbs)= \nMoment About Y', 'Mz = (in-lbs)\nMoment About Z']
self.loads_in = [tk.StringVar(), tk.StringVar(), tk.StringVar(), tk.StringVar(), tk.StringVar(), tk.StringVar()]
for load in self.loads_in:
load.set('0.00')
i=0
for label, load in zip(load_labels, self.loads_in):
current_load_row_count = 2+i
tk.Label(self.tab_loads, text=label, font=self.f_type, justify=tk.RIGHT).grid(row=current_load_row_count,column=0, sticky = tk.E)
tk.Entry(self.tab_loads, textvariable=load, width=20).grid(row=current_load_row_count,column=1, sticky = tk.W)
i+=1
material_labels = ['Fexx,weld (ksi)= ','Fy,base 1 (ksi) = ','Fu,base 1 (ksi)= ','t,base 1 (in) = ','Fy,base 2 (ksi) = ','Fu,base 2 (ksi)= ','t,base 2 (in) = ']
self.material_in = [tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar()]
default_material = ['70.00','36.00','58.00','0.375','36.00','58.00','0.375']
for material, default in zip(self.material_in, default_material):
material.set(default)
y=0
for material_label, material in zip(material_labels,self.material_in):
current_material_row_count = 2+y
tk.Label(self.tab_loads, text=material_label, font=self.f_type, justify=tk.RIGHT).grid(row=current_material_row_count,column=3, sticky = tk.E)
tk.Entry(self.tab_loads, textvariable=material, width=15).grid(row=current_material_row_count,column=4, sticky = tk.W)
y+=1
self.run_analysis_button = tk.Button(self.tab_loads,text = "Run Analysis (Quadrant)", command = self.force_analysis, font=self.f_type_b)
self.run_analysis_button.grid(row=current_load_row_count+1, column=0, columnspan=2, pady=10)
self.run_analysis_segment_button = tk.Button(self.tab_loads,text = "Run Analysis (Segment)", command = self.force_analysis_segment, font=self.f_type_b)
self.run_analysis_segment_button.grid(row=current_load_row_count+2, column=0, columnspan=2, pady=10)
self.run_analysis_conservative_button = tk.Button(self.tab_loads,text = "Run Analysis (Conservative)", command = self.force_analysis_conservative, font=self.f_type_b)
self.run_analysis_conservative_button.grid(row=current_load_row_count+3, column=0, columnspan=2, pady=10)
self.design_asd = tk.IntVar()
tk.Checkbutton(self.tab_loads , text=' : Loads are Nominal (ASD)', variable=self.design_asd, font=self.f_type_b).grid(row=current_material_row_count+1, column=4, sticky = tk.W)
self.run_design_button = tk.Button(self.tab_loads,text = "Run AISC Design", command = self.aisc_design, font=self.f_type_b)
self.run_design_button.grid(row=current_material_row_count+2, column=3, columnspan=2, pady=10)
def quit_app(self):
self.master.destroy()
self.master.quit()
def segment_list_scroll(self, *args):
self.segment_list.yview(*args)
def add_segment(self):
x0 = float(self.start_x_in.get())
y0 = float(self.start_y_in.get())
x1 = float(self.end_x_in.get())
y1 = float(self.end_y_in.get())
start = [x0,y0]
end = [x1,y1]
weld = wem.weld_segment(start,end)
self.weld_segments.append(weld)
self.build_weld_group()
self.fill_segment_list()
def remove_last_segment(self):
del self.weld_segments[-1]
self.build_weld_group()
self.fill_segment_list()
def remove_all(self):
del self.weld_segments[:]
self.segment_list.delete(0,tk.END)
def fill_segment_list(self):
color = "pale green"
self.segment_list.delete(0,tk.END)
i=0
for segment in self.weld_segments:
self.segment_list.insert(tk.END, segment.info_text+segment.global_info_text)
if i % 2 == 0:
self.segment_list.itemconfigure(i, background=color)
else:
pass
i+=1
self.draw_weld()
def draw_weld(self,*event):
self.weld_canvas.delete("all")
w = self.weld_canvas.winfo_width()
h = self.weld_canvas.winfo_height()
# x y arrows
coord_start = 10
self.weld_canvas.create_line(coord_start,h-coord_start,coord_start+50,h-coord_start, fill='green', width=1, arrow=tk.LAST)
self.weld_canvas.create_text(coord_start+50,h-(coord_start+8), text='x', fill='green')
self.weld_canvas.create_line(coord_start,h-coord_start,coord_start,h-(coord_start+50), fill='green', width=1, arrow=tk.LAST)
self.weld_canvas.create_text(coord_start+8,h-(coord_start+50), text='y', fill='green')
if len(self.weld_segments)<1:
pass
else:
min_x = min(min([weld.start[0] for weld in self.weld_segments]), min([weld.end[0] for weld in self.weld_segments]))
min_y = min(min([weld.start[1] for weld in self.weld_segments]), min([weld.end[1] for weld in self.weld_segments]))
max_x = max(max([weld.start[0] for weld in self.weld_segments]), max([weld.end[0] for weld in self.weld_segments])) - min_x
max_y = max(max([weld.start[1] for weld in self.weld_segments]), max([weld.end[1] for weld in self.weld_segments])) - min_y
max_dim_for_scale = max(max_x, max_y)
initial = 50
if max_x == 0:
sf_x = (w - (2*initial))
else:
sf_x = (w - (2*initial)) / max_dim_for_scale
if max_y == 0:
sf_y = (h - (2*initial))
else:
sf_y = (h - (2*initial)) / max_dim_for_scale
for weld in self.weld_segments:
x0 = ((weld.x_coords[0] - min_x) * sf_x) + initial
y0 = h - (((weld.y_coords[0] - min_y) * sf_y) + initial)
x1 = ((weld.x_coords[1] - min_x) * sf_x) + initial
y1 = h - (((weld.y_coords[1] - min_y) * sf_y) + initial)
self.weld_canvas.create_line(x0,y0,x1,y1, fill='red', width=2)
if self.group_built == 1:
xc = ((self.weld_group.group_center[0] - min_x) * sf_x) + initial
yc = h - (((self.weld_group.group_center[1]-min_y) * sf_y) + initial)
extension = initial/2
self.weld_canvas.create_line(extension,yc,w-extension,yc, fill='blue', width=1, dash=(6,6))
self.weld_canvas.create_line(xc,h-extension,xc,extension, fill='blue', width=1, dash=(6,6))
def build_weld_group(self):
if len(self.weld_segments) > 1:
del self.group_result_labels[:]
self.weld_group = wem.elastic_weld_group(self.weld_segments)
self.group_built = 1
self.group_textbox.delete(1.0,tk.END)
i=0
for label, equation, value in zip(self.weld_group.gui_output_labels,self.weld_group.gui_output_equations,self.weld_group.gui_output_values):
self.group_textbox.insert(tk.END,'{0}{1}{2:.3f}\n'.format(label, equation, value))
i+=1
self.draw_weld()
else:
self.group_built = 0
def force_analysis(self):
del self.analysis_result_labels[:]
forces = []
for load in self.loads_in:
forces.append(float(load.get()))
if self.group_built == 1:
self.weld_group.force_analysis(forces[0],forces[1],forces[2],forces[3],forces[4],forces[5])
self.forces_run = 1
self.resultant = self.weld_group.resultant
self.load_textbox.delete(1.0,tk.END)
i=0
for label, equation, value in zip(self.weld_group.component_forces_key,self.weld_group.component_forces_eqs,self.weld_group.component_forces):
self.load_textbox.insert(tk.END,'{0} = {1} = {2:.3f} lbs/in\n'.format(label, equation, value))
i+=1
def force_analysis_segment(self):
forces = []
for load in self.loads_in:
forces.append(float(load.get()))
if self.group_built == 1:
self.weld_group.force_analysis_segment(forces[0],forces[1],forces[2],forces[3],forces[4],forces[5])
self.forces_run = 1
self.resultant = self.weld_group.segment_resultant
self.load_textbox.delete(1.0,tk.END)
self.load_textbox.insert(tk.END,'Resultant = {0:.3f} \n'.format(self.resultant))
for label, result in zip(self.weld_group.gui_forces_segment,self.weld_group.gui_forces_stresses):
self.load_textbox.insert(tk.END,'{0} : {1}\n'.format(label, result))
def force_analysis_conservative(self):
del self.analysis_result_labels[:]
forces = []
for load in self.loads_in:
forces.append(float(load.get()))
if self.group_built == 1:
self.weld_group.force_analysis_conservative(forces[0],forces[1],forces[2],forces[3],forces[4],forces[5])
self.forces_run = 1
self.resultant = self.weld_group.resultant_conservative
self.load_textbox.delete(1.0,tk.END)
i=0
for label, equation, value in zip(self.weld_group.component_forces_key_conservative,self.weld_group.component_forces_eqs_conservative,self.weld_group.component_forces_conservative):
self.load_textbox.insert(tk.END,'{0} = {1} = {2:.3f} lbs/in\n'.format(label, equation, value))
i+=1
def aisc_design(self):
if self.group_built == 1 and self.forces_run == 1:
resultant = self.resultant
Fexx = 1000 * float(self.material_in[0].get())
Fy_base1 = 1000 * float(self.material_in[1].get())
Fu_base1 = 1000 * float(self.material_in[2].get())
base_thickness1 = float(self.material_in[3].get())
Fy_base2 = 1000 * float(self.material_in[4].get())
Fu_base2 =1000 * float(self.material_in[5].get())
base_thickness2= float(self.material_in[6].get())
asd = self.design_asd.get()
self.weld_group.aisc_weld_check(resultant,Fexx, Fy_base1, Fu_base1, base_thickness1, Fy_base2, Fu_base2, base_thickness2, asd)
self.aisc_textbox.delete(1.0,tk.END)
self.aisc_textbox.insert(tk.END, self.weld_group.aisclog)
def add_circle_function(self):
x = float(self.add_circle_ins[0].get())
y = float(self.add_circle_ins[1].get())
r = float(self.add_circle_ins[2].get())
start = int(self.add_circle_ins[3].get())
end = int(self.add_circle_ins[4].get())
for a in range(start,end):
x0 = r*math.cos(math.radians(a))
y0 = r*math.sin(math.radians(a))
x1 = r*math.cos(math.radians(a+1))
y1 = r*math.sin(math.radians(a+1))
start = [x0+x,y0+y]
end = [x1+x,y1+y]
weld = wem.weld_segment(start,end)
self.weld_segments.append(weld)
self.build_weld_group()
self.fill_segment_list()
def main():
root = tk.Tk()
root.title("Elastic Weld Analysis and AISC Design - Alpha v1")
Master_window(root)
root.minsize(1280,720)
root.mainloop()
if __name__ == '__main__':
main()
| |
from top_level.technical.db import DB, Session
from top_level.domain.hourtable import HourTable
from top_level.domain.date import Date
from top_level.domain.user import User
from top_level.domain.event import Event
from top_level.domain.hour import Hour
user_obj = User('','')
hourtable_obj = HourTable('')
event_obj = Event('','', user_obj)
date_obj = Date(0, None, None)
def edit_user(user):
if user == None:
user = User('','')
DB.add(user)
editing = True
while editing:
case = input("\n1 Edit Name\
\n2 Edit Email\
\n3 Return\n")
if case == '1':
new_name = input('New name: ')
user.name = new_name
if case == '2':
new_email = input('New email: ')
user.email = new_email
if case == '3':
DB.flush()
editing = False
return user
def edit_event(event):
editing = True
while editing:
case = input("\n1 Edit Name\
\n2 Edit Local\
\n3 Edit User\
\n4 Add Date\
\n5 List Dates\
\n6 Delete Date\
\n7 Return\n")
if case == '1':
new_name = input('New name: ')
event.name = new_name
if case == '2':
new_local = input('New local: ')
event.local = new_local
if case == '3':
user = edit_user(event.user)
event.user = user
if case == '4':
weekday = input('When is a possible weekday?')
starthour = Hour(input('When is the start hour?'))
finishhour = Hour(input('When in the finish hour?'))
date = Date(weekday, starthour, finishhour)
event.add_date(date)
if case == '5':
event.list_dates()
if case == '6':
_id = int(input('Date ID: '))
date = DB.search_one(date_obj, _id)
if date is None:
print('None date found!\n')
else:
DB.delete(date)
if case == '7':
DB.flush()
editing = False
def user_operations(current_user):
exit = False
while not exit:
case = input("\n1 Create User\
\n2 List Users\
\n3 Change User\
\n4 Edit User\
\n5 Delete User\
\n6 Return\n")
if case == '1':
name = input('UserName: ')
email = input('E-mail: ')
user = User(name, email)
DB.add(user)
current_user = user
if case == '2':
for i in DB.search_all(user_obj):
print('{} - ID = {}\n'.format(i, i._id))
if case == '3':
_id = int(input('UserName ID: '))
user = DB.search_one(user_obj, _id)
if user is None:
print('None user found!\n')
else:
print('User changed to {}'.format(user.name))
current_user = user
if case == '4':
_id = int(input('UserName ID: '))
user = DB.search_one(user_obj, _id)
if user is None:
print('None user found!\n')
else:
edit_user(user)
if case == '5':
_id = int(input('UserName ID: '))
user = DB.search_one(user_obj, _id)
if user is None:
print('None user found!\n')
else:
DB.delete(user)
current_user = None
if case == '6':
exit = True
return current_user
def hour_table_operations(current_hour_table, current_user):
exit = False
while not exit:
case = input("\n1 Create HourTable\
\n2 List HourTables\
\n3 Change HourTable\
\n4 Edit HourTable\
\n5 Delete HourTable\
\n6 Event Operations\
\n7 Return\n")
if case == '1':
name = input('HourTable name: ')
hour_table = HourTable(name)
DB.add(hour_table)
current_hour_table = hour_table
if case == '2':
for i in DB.search_all(hourtable_obj):
print('{} - ID = {}\n'.format(i, i._id))
if case == '3':
_id = int(input('HourTable ID: '))
hour_table = DB.search_one(hourtable_obj, _id)
if hour_table is None:
print('None HourTable found!\n')
else:
print('HourTable changed to {}'.format(hour_table.name))
current_hour_table = hour_table
if case == '4':
_id = int(input('HourTable ID: '))
hour_table = DB.search_one(hourtable_obj, _id)
if hour_table is None:
print('None HourTable found!\n')
else:
new_name = input('New name: ')
hour_table.name = new_name
DB.flush()
if case == '5':
_id = int(input('HourTable ID: '))
hour_table = DB.search_one(hourtable_obj, _id)
if hour_table is None:
print('None HourTable found!\n')
else:
DB.delete(hour_table)
current_hour_table = None
if case == '6':
if current_hour_table is None or current_user is None:
print('\n Need a User and Hourtable selected to do event operations!')
else:
event_operations(current_hour_table, current_user)
if case == '7':
exit = True
return current_hour_table
def event_operations(current_hour_table, current_user):
exit = False
while not exit:
case = input("\n1 Create Event\
\n2 List Events\
\n3 Edit Event\
\n4 Delete Event\
\n5 Check In Common\
\n6 Check Non Conflicting Possibilities\
\n7 Return\n")
if case == '1':
name = input('What is the name of the event?')
local = input('What is the local of the event?')
event = Event(name, local, current_user)
possible = True
while possible:
weekday = input('When is a possible weekday?')
starthour = Hour(input('When is the start hour?'))
finishhour = Hour(input('When in the finish hour?'))
date = Date(weekday, starthour, finishhour)
event.add_date(date)
DB.add(event)
if input('Any more dates? [y/n]') == 'y':
possible = True
else:
possible = False
current_hour_table.add_event(event)
DB.flush()
if case == '2':
current_hour_table.list_events()
if case == '3':
_id = int(input('Event ID: '))
event = DB.search_one(event_obj, _id)
if event is None:
print('None event found!\n')
else:
edit_event(event)
if case == '4':
_id = int(input('Event ID: '))
event = DB.search_one(event_obj, _id)
if event is None:
print('None event found!\n')
else:
DB.delete(event)
if case == '5':
for i in current_hour_table.check_common():
print(i)
if case == '6':
for i in current_hour_table.check_possibilities():
print(i)
if case == '7':
exit = True
def main():
DB.create();
current_hour_table = DB.search_first(hourtable_obj)
current_user = DB.search_first(user_obj)
exit = False
while not exit:
case = input("\n1 User Operations\
\n2 HourTable Operations\
\n3 Clear DataBase\
\n4 Exit\n")
if case == '1':
current_user = user_operations(current_user)
if case == '2':
current_hour_table = hour_table_operations(current_hour_table, \
current_user)
if case == '3':
DB.clear()
if case == '4':
exit =True
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from six.moves import StringIO
import glanceclient.exc
import mock
from oslo_config import cfg
from oslo_utils import netutils
import testtools
from nova import context
from nova import exception
from nova.image import glance
from nova import test
CONF = cfg.CONF
NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000"
class tzinfo(datetime.tzinfo):
@staticmethod
def utcoffset(*args, **kwargs):
return datetime.timedelta()
NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo())
class TestConversions(test.NoDBTestCase):
def test_convert_timestamps_to_datetimes(self):
fixture = {'name': None,
'properties': {},
'status': None,
'is_public': None,
'created_at': NOW_GLANCE_FORMAT,
'updated_at': NOW_GLANCE_FORMAT,
'deleted_at': NOW_GLANCE_FORMAT}
result = glance._convert_timestamps_to_datetimes(fixture)
self.assertEqual(result['created_at'], NOW_DATETIME)
self.assertEqual(result['updated_at'], NOW_DATETIME)
self.assertEqual(result['deleted_at'], NOW_DATETIME)
def _test_extracting_missing_attributes(self, include_locations):
# Verify behavior from glance objects that are missing attributes
# TODO(jaypipes): Find a better way of testing this crappy
# glanceclient magic object stuff.
class MyFakeGlanceImage(object):
def __init__(self, metadata):
IMAGE_ATTRIBUTES = ['size', 'owner', 'id', 'created_at',
'updated_at', 'status', 'min_disk',
'min_ram', 'is_public']
raw = dict.fromkeys(IMAGE_ATTRIBUTES)
raw.update(metadata)
self.__dict__['raw'] = raw
def __getattr__(self, key):
try:
return self.__dict__['raw'][key]
except KeyError:
raise AttributeError(key)
def __setattr__(self, key, value):
try:
self.__dict__['raw'][key] = value
except KeyError:
raise AttributeError(key)
metadata = {
'id': 1,
'created_at': NOW_DATETIME,
'updated_at': NOW_DATETIME,
}
image = MyFakeGlanceImage(metadata)
observed = glance._extract_attributes(
image, include_locations=include_locations)
expected = {
'id': 1,
'name': None,
'is_public': None,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': NOW_DATETIME,
'updated_at': NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None
}
if include_locations:
expected['locations'] = None
expected['direct_url'] = None
self.assertEqual(expected, observed)
def test_extracting_missing_attributes_include_locations(self):
self._test_extracting_missing_attributes(include_locations=True)
def test_extracting_missing_attributes_exclude_locations(self):
self._test_extracting_missing_attributes(include_locations=False)
class TestExceptionTranslations(test.NoDBTestCase):
def test_client_forbidden_to_imagenotauthed(self):
in_exc = glanceclient.exc.Forbidden('123')
out_exc = glance._translate_image_exception('123', in_exc)
self.assertIsInstance(out_exc, exception.ImageNotAuthorized)
def test_client_httpforbidden_converts_to_imagenotauthed(self):
in_exc = glanceclient.exc.HTTPForbidden('123')
out_exc = glance._translate_image_exception('123', in_exc)
self.assertIsInstance(out_exc, exception.ImageNotAuthorized)
def test_client_notfound_converts_to_imagenotfound(self):
in_exc = glanceclient.exc.NotFound('123')
out_exc = glance._translate_image_exception('123', in_exc)
self.assertIsInstance(out_exc, exception.ImageNotFound)
def test_client_httpnotfound_converts_to_imagenotfound(self):
in_exc = glanceclient.exc.HTTPNotFound('123')
out_exc = glance._translate_image_exception('123', in_exc)
self.assertIsInstance(out_exc, exception.ImageNotFound)
class TestGlanceSerializer(test.NoDBTestCase):
def test_serialize(self):
metadata = {'name': 'image1',
'is_public': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings': [
{'virtual': 'aaa',
'device': 'bbb'},
{'virtual': 'xxx',
'device': 'yyy'}],
'block_device_mapping': [
{'virtual_device': 'fake',
'device_name': '/dev/fake'},
{'virtual_device': 'ephemeral0',
'device_name': '/dev/fake0'}]}}
# NOTE(tdurakov): Assertion of serialized objects won't work
# during using of random PYTHONHASHSEED. Assertion of
# serialized/deserialized object and initial one is enough
converted = glance._convert_to_string(metadata)
self.assertEqual(glance._convert_from_string(converted), metadata)
class TestGetImageService(test.NoDBTestCase):
@mock.patch.object(glance.GlanceClientWrapper, '__init__',
return_value=None)
def test_get_remote_service_from_id(self, gcwi_mocked):
id_or_uri = '123'
_ignored, image_id = glance.get_remote_image_service(
mock.sentinel.ctx, id_or_uri)
self.assertEqual(id_or_uri, image_id)
gcwi_mocked.assert_called_once_with()
@mock.patch.object(glance.GlanceClientWrapper, '__init__',
return_value=None)
def test_get_remote_service_from_href(self, gcwi_mocked):
id_or_uri = 'http://127.0.0.1/123'
_ignored, image_id = glance.get_remote_image_service(
mock.sentinel.ctx, id_or_uri)
self.assertEqual('123', image_id)
gcwi_mocked.assert_called_once_with(context=mock.sentinel.ctx,
host='127.0.0.1',
port=80,
use_ssl=False)
class TestCreateGlanceClient(test.NoDBTestCase):
@mock.patch('oslo_utils.netutils.is_valid_ipv6')
@mock.patch('glanceclient.Client')
def test_headers_passed_glanceclient(self, init_mock, ipv6_mock):
self.flags(auth_strategy='keystone')
ipv6_mock.return_value = False
auth_token = 'token'
ctx = context.RequestContext('fake', 'fake', auth_token=auth_token)
host = 'host4'
port = 9295
use_ssl = False
expected_endpoint = 'http://host4:9295'
expected_params = {
'identity_headers': {
'X-Auth-Token': 'token',
'X-User-Id': 'fake',
'X-Roles': '',
'X-Tenant-Id': 'fake',
'X-Identity-Status': 'Confirmed'
},
'token': 'token'
}
glance._create_glance_client(ctx, host, port, use_ssl)
init_mock.assert_called_once_with('1', expected_endpoint,
**expected_params)
# Test the version is properly passed to glanceclient.
ipv6_mock.reset_mock()
init_mock.reset_mock()
expected_endpoint = 'http://host4:9295'
expected_params = {
'identity_headers': {
'X-Auth-Token': 'token',
'X-User-Id': 'fake',
'X-Roles': '',
'X-Tenant-Id': 'fake',
'X-Identity-Status': 'Confirmed'
},
'token': 'token'
}
glance._create_glance_client(ctx, host, port, use_ssl, version=2)
init_mock.assert_called_once_with('2', expected_endpoint,
**expected_params)
# Test that non-keystone auth strategy doesn't bother to pass
# glanceclient all the Keystone-related headers.
ipv6_mock.reset_mock()
init_mock.reset_mock()
self.flags(auth_strategy='non-keystone')
expected_endpoint = 'http://host4:9295'
expected_params = {
}
glance._create_glance_client(ctx, host, port, use_ssl)
init_mock.assert_called_once_with('1', expected_endpoint,
**expected_params)
# Test that the IPv6 bracketization adapts the endpoint properly.
ipv6_mock.reset_mock()
init_mock.reset_mock()
ipv6_mock.return_value = True
expected_endpoint = 'http://[host4]:9295'
expected_params = {
}
glance._create_glance_client(ctx, host, port, use_ssl)
init_mock.assert_called_once_with('1', expected_endpoint,
**expected_params)
class TestGlanceClientWrapper(test.NoDBTestCase):
@mock.patch('time.sleep')
@mock.patch('nova.image.glance._create_glance_client')
def test_static_client_without_retries(self, create_client_mock,
sleep_mock):
client_mock = mock.MagicMock()
images_mock = mock.MagicMock()
images_mock.get.side_effect = glanceclient.exc.ServiceUnavailable
type(client_mock).images = mock.PropertyMock(return_value=images_mock)
create_client_mock.return_value = client_mock
self.flags(num_retries=0, group='glance')
ctx = context.RequestContext('fake', 'fake')
host = 'host4'
port = 9295
use_ssl = False
client = glance.GlanceClientWrapper(context=ctx, host=host, port=port,
use_ssl=use_ssl)
create_client_mock.assert_called_once_with(ctx, host, port, use_ssl, 1)
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctx, 1, 'get', 'meow')
self.assertFalse(sleep_mock.called)
@mock.patch('nova.image.glance.LOG')
@mock.patch('time.sleep')
@mock.patch('nova.image.glance._create_glance_client')
def test_static_client_with_retries_negative(self, create_client_mock,
sleep_mock, mock_log):
client_mock = mock.Mock(spec=glanceclient.Client)
images_mock = mock.Mock()
images_mock.get.side_effect = glanceclient.exc.ServiceUnavailable
client_mock.images = images_mock
create_client_mock.return_value = client_mock
self.flags(num_retries=-1, group='glance')
ctx = context.RequestContext('fake', 'fake')
host = 'host4'
port = 9295
use_ssl = False
client = glance.GlanceClientWrapper(context=ctx, host=host, port=port,
use_ssl=use_ssl)
create_client_mock.assert_called_once_with(ctx, host, port, use_ssl, 1)
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctx, 1, 'get', 'meow')
self.assertTrue(mock_log.warning.called)
msg = mock_log.warning.call_args_list[0]
self.assertIn('Treating negative config value', msg[0][0])
self.assertFalse(sleep_mock.called)
@mock.patch('time.sleep')
@mock.patch('nova.image.glance._create_glance_client')
def test_static_client_with_retries(self, create_client_mock,
sleep_mock):
self.flags(num_retries=1, group='glance')
client_mock = mock.MagicMock()
images_mock = mock.MagicMock()
images_mock.get.side_effect = [
glanceclient.exc.ServiceUnavailable,
None
]
type(client_mock).images = mock.PropertyMock(return_value=images_mock)
create_client_mock.return_value = client_mock
ctx = context.RequestContext('fake', 'fake')
host = 'host4'
port = 9295
use_ssl = False
client = glance.GlanceClientWrapper(context=ctx,
host=host, port=port, use_ssl=use_ssl)
client.call(ctx, 1, 'get', 'meow')
sleep_mock.assert_called_once_with(1)
@mock.patch('random.shuffle')
@mock.patch('time.sleep')
@mock.patch('nova.image.glance._create_glance_client')
def test_default_client_without_retries(self, create_client_mock,
sleep_mock, shuffle_mock):
api_servers = [
'host1:9292',
'https://host2:9293',
'http://host3:9294'
]
client_mock = mock.MagicMock()
images_mock = mock.MagicMock()
images_mock.get.side_effect = glanceclient.exc.ServiceUnavailable
type(client_mock).images = mock.PropertyMock(return_value=images_mock)
create_client_mock.return_value = client_mock
shuffle_mock.return_value = api_servers
self.flags(num_retries=0, group='glance')
self.flags(api_servers=api_servers, group='glance')
# Here we are testing the behaviour that calling client.call() twice
# when there are no retries will cycle through the api_servers and not
# sleep (which would be an indication of a retry)
ctx = context.RequestContext('fake', 'fake')
client = glance.GlanceClientWrapper()
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctx, 1, 'get', 'meow')
self.assertFalse(sleep_mock.called)
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctx, 1, 'get', 'meow')
self.assertFalse(sleep_mock.called)
create_client_mock.assert_has_calls(
[
mock.call(ctx, 'host1', 9292, False, 1),
mock.call(ctx, 'host2', 9293, True, 1),
]
)
@mock.patch('random.shuffle')
@mock.patch('time.sleep')
@mock.patch('nova.image.glance._create_glance_client')
def test_default_client_with_retries(self, create_client_mock,
sleep_mock, shuffle_mock):
api_servers = [
'host1:9292',
'https://host2:9293',
'http://host3:9294'
]
client_mock = mock.MagicMock()
images_mock = mock.MagicMock()
images_mock.get.side_effect = [
glanceclient.exc.ServiceUnavailable,
None
]
type(client_mock).images = mock.PropertyMock(return_value=images_mock)
create_client_mock.return_value = client_mock
self.flags(num_retries=1, group='glance')
self.flags(api_servers=api_servers, group='glance')
ctx = context.RequestContext('fake', 'fake')
# And here we're testing that if num_retries is not 0, then we attempt
# to retry the same connection action against the next client.
client = glance.GlanceClientWrapper()
client.call(ctx, 1, 'get', 'meow')
create_client_mock.assert_has_calls(
[
mock.call(ctx, 'host1', 9292, False, 1),
mock.call(ctx, 'host2', 9293, True, 1),
]
)
sleep_mock.assert_called_once_with(1)
@mock.patch('oslo_service.sslutils.is_enabled')
@mock.patch('glanceclient.Client')
def test_create_glance_client_with_ssl(self, client_mock,
ssl_enable_mock):
self.flags(ca_file='foo.cert', cert_file='bar.cert',
key_file='wut.key', group='ssl')
ctxt = mock.sentinel.ctx
glance._create_glance_client(ctxt, 'host4', 9295, use_ssl=True)
client_mock.assert_called_once_with(
'1', 'https://host4:9295', insecure=False, ssl_compression=False,
cert_file='bar.cert', key_file='wut.key', cacert='foo.cert')
@mock.patch.object(glanceclient.common.http.HTTPClient, 'get')
def test_determine_curr_major_version(self, http_client_mock):
result = ("http://host1:9292/v2/", {'versions': [
{'status': 'CURRENT', 'id': 'v2.3'},
{'status': 'SUPPORTED', 'id': 'v1.0'}]})
http_client_mock.return_value = result
maj_ver = glance._determine_curr_major_version('http://host1:9292')
self.assertEqual(2, maj_ver)
@mock.patch.object(glanceclient.common.http.HTTPClient, 'get')
def test_determine_curr_major_version_invalid(self, http_client_mock):
result = ("http://host1:9292/v2/", "Invalid String")
http_client_mock.return_value = result
curr_major_version = glance._determine_curr_major_version('abc')
self.assertIsNone(curr_major_version)
@mock.patch.object(glanceclient.common.http.HTTPClient, 'get')
def test_determine_curr_major_version_unsupported(self, http_client_mock):
result = ("http://host1:9292/v2/", {'versions': [
{'status': 'CURRENT', 'id': 'v666.0'},
{'status': 'SUPPORTED', 'id': 'v1.0'}]})
http_client_mock.return_value = result
maj_ver = glance._determine_curr_major_version('http://host1:9292')
self.assertIsNone(maj_ver)
class TestDownloadNoDirectUri(test.NoDBTestCase):
"""Tests the download method of the GlanceImageService when the
default of not allowing direct URI transfers is set.
"""
@mock.patch('__builtin__.open')
@mock.patch('nova.image.glance.GlanceImageService.show')
def test_download_no_data_no_dest_path(self, show_mock, open_mock):
client = mock.MagicMock()
client.call.return_value = mock.sentinel.image_chunks
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
res = service.download(ctx, mock.sentinel.image_id)
self.assertFalse(show_mock.called)
self.assertFalse(open_mock.called)
client.call.assert_called_once_with(ctx, 1, 'data',
mock.sentinel.image_id)
self.assertEqual(mock.sentinel.image_chunks, res)
@mock.patch('__builtin__.open')
@mock.patch('nova.image.glance.GlanceImageService.show')
def test_download_data_no_dest_path(self, show_mock, open_mock):
client = mock.MagicMock()
client.call.return_value = [1, 2, 3]
ctx = mock.sentinel.ctx
data = mock.MagicMock()
service = glance.GlanceImageService(client)
res = service.download(ctx, mock.sentinel.image_id, data=data)
self.assertFalse(show_mock.called)
self.assertFalse(open_mock.called)
client.call.assert_called_once_with(ctx, 1, 'data',
mock.sentinel.image_id)
self.assertIsNone(res)
data.write.assert_has_calls(
[
mock.call(1),
mock.call(2),
mock.call(3)
]
)
self.assertFalse(data.close.called)
@mock.patch('__builtin__.open')
@mock.patch('nova.image.glance.GlanceImageService.show')
def test_download_no_data_dest_path(self, show_mock, open_mock):
client = mock.MagicMock()
client.call.return_value = [1, 2, 3]
ctx = mock.sentinel.ctx
writer = mock.MagicMock()
open_mock.return_value = writer
service = glance.GlanceImageService(client)
res = service.download(ctx, mock.sentinel.image_id,
dst_path=mock.sentinel.dst_path)
self.assertFalse(show_mock.called)
client.call.assert_called_once_with(ctx, 1, 'data',
mock.sentinel.image_id)
open_mock.assert_called_once_with(mock.sentinel.dst_path, 'wb')
self.assertIsNone(res)
writer.write.assert_has_calls(
[
mock.call(1),
mock.call(2),
mock.call(3)
]
)
writer.close.assert_called_once_with()
@mock.patch('__builtin__.open')
@mock.patch('nova.image.glance.GlanceImageService.show')
def test_download_data_dest_path(self, show_mock, open_mock):
# NOTE(jaypipes): This really shouldn't be allowed, but because of the
# horrible design of the download() method in GlanceImageService, no
# error is raised, and the dst_path is ignored...
# #TODO(jaypipes): Fix the aforementioned horrible design of
# the download() method.
client = mock.MagicMock()
client.call.return_value = [1, 2, 3]
ctx = mock.sentinel.ctx
data = mock.MagicMock()
service = glance.GlanceImageService(client)
res = service.download(ctx, mock.sentinel.image_id, data=data)
self.assertFalse(show_mock.called)
self.assertFalse(open_mock.called)
client.call.assert_called_once_with(ctx, 1, 'data',
mock.sentinel.image_id)
self.assertIsNone(res)
data.write.assert_has_calls(
[
mock.call(1),
mock.call(2),
mock.call(3)
]
)
self.assertFalse(data.close.called)
@mock.patch('__builtin__.open')
@mock.patch('nova.image.glance.GlanceImageService.show')
def test_download_data_dest_path_write_fails(self, show_mock, open_mock):
client = mock.MagicMock()
client.call.return_value = [1, 2, 3]
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
# NOTE(mikal): data is a file like object, which in our case always
# raises an exception when we attempt to write to the file.
class FakeDiskException(Exception):
pass
class Exceptionator(StringIO):
def write(self, _):
raise FakeDiskException('Disk full!')
self.assertRaises(FakeDiskException, service.download, ctx,
mock.sentinel.image_id, data=Exceptionator())
@mock.patch('nova.image.glance.GlanceImageService._get_transfer_module')
@mock.patch('nova.image.glance.GlanceImageService.show')
def test_download_direct_file_uri(self, show_mock, get_tran_mock):
self.flags(allowed_direct_url_schemes=['file'], group='glance')
show_mock.return_value = {
'locations': [
{
'url': 'file:///files/image',
'metadata': mock.sentinel.loc_meta
}
]
}
tran_mod = mock.MagicMock()
get_tran_mock.return_value = tran_mod
client = mock.MagicMock()
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
res = service.download(ctx, mock.sentinel.image_id,
dst_path=mock.sentinel.dst_path)
self.assertIsNone(res)
self.assertFalse(client.call.called)
show_mock.assert_called_once_with(ctx,
mock.sentinel.image_id,
include_locations=True)
get_tran_mock.assert_called_once_with('file')
tran_mod.download.assert_called_once_with(ctx, mock.ANY,
mock.sentinel.dst_path,
mock.sentinel.loc_meta)
@mock.patch('__builtin__.open')
@mock.patch('nova.image.glance.GlanceImageService._get_transfer_module')
@mock.patch('nova.image.glance.GlanceImageService.show')
def test_download_direct_exception_fallback(self, show_mock,
get_tran_mock,
open_mock):
# Test that we fall back to downloading to the dst_path
# if the download method of the transfer module raised
# an exception.
self.flags(allowed_direct_url_schemes=['file'], group='glance')
show_mock.return_value = {
'locations': [
{
'url': 'file:///files/image',
'metadata': mock.sentinel.loc_meta
}
]
}
tran_mod = mock.MagicMock()
tran_mod.download.side_effect = Exception
get_tran_mock.return_value = tran_mod
client = mock.MagicMock()
client.call.return_value = [1, 2, 3]
ctx = mock.sentinel.ctx
writer = mock.MagicMock()
open_mock.return_value = writer
service = glance.GlanceImageService(client)
res = service.download(ctx, mock.sentinel.image_id,
dst_path=mock.sentinel.dst_path)
self.assertIsNone(res)
show_mock.assert_called_once_with(ctx,
mock.sentinel.image_id,
include_locations=True)
get_tran_mock.assert_called_once_with('file')
tran_mod.download.assert_called_once_with(ctx, mock.ANY,
mock.sentinel.dst_path,
mock.sentinel.loc_meta)
client.call.assert_called_once_with(ctx, 1, 'data',
mock.sentinel.image_id)
# NOTE(jaypipes): log messages call open() in part of the
# download path, so here, we just check that the last open()
# call was done for the dst_path file descriptor.
open_mock.assert_called_with(mock.sentinel.dst_path, 'wb')
self.assertIsNone(res)
writer.write.assert_has_calls(
[
mock.call(1),
mock.call(2),
mock.call(3)
]
)
@mock.patch('__builtin__.open')
@mock.patch('nova.image.glance.GlanceImageService._get_transfer_module')
@mock.patch('nova.image.glance.GlanceImageService.show')
def test_download_direct_no_mod_fallback(self, show_mock,
get_tran_mock,
open_mock):
# Test that we fall back to downloading to the dst_path
# if no appropriate transfer module is found...
# an exception.
self.flags(allowed_direct_url_schemes=['funky'], group='glance')
show_mock.return_value = {
'locations': [
{
'url': 'file:///files/image',
'metadata': mock.sentinel.loc_meta
}
]
}
get_tran_mock.return_value = None
client = mock.MagicMock()
client.call.return_value = [1, 2, 3]
ctx = mock.sentinel.ctx
writer = mock.MagicMock()
open_mock.return_value = writer
service = glance.GlanceImageService(client)
res = service.download(ctx, mock.sentinel.image_id,
dst_path=mock.sentinel.dst_path)
self.assertIsNone(res)
show_mock.assert_called_once_with(ctx,
mock.sentinel.image_id,
include_locations=True)
get_tran_mock.assert_called_once_with('file')
client.call.assert_called_once_with(ctx, 1, 'data',
mock.sentinel.image_id)
# NOTE(jaypipes): log messages call open() in part of the
# download path, so here, we just check that the last open()
# call was done for the dst_path file descriptor.
open_mock.assert_called_with(mock.sentinel.dst_path, 'wb')
self.assertIsNone(res)
writer.write.assert_has_calls(
[
mock.call(1),
mock.call(2),
mock.call(3)
]
)
writer.close.assert_called_once_with()
class TestIsImageAvailable(test.NoDBTestCase):
"""Tests the internal _is_image_available function."""
class ImageSpecV2(object):
visibility = None
properties = None
class ImageSpecV1(object):
is_public = None
properties = None
def test_auth_token_override(self):
ctx = mock.MagicMock(auth_token=True)
img = mock.MagicMock()
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
self.assertFalse(img.called)
def test_admin_override(self):
ctx = mock.MagicMock(auth_token=False, is_admin=True)
img = mock.MagicMock()
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
self.assertFalse(img.called)
def test_v2_visibility(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False)
# We emulate warlock validation that throws an AttributeError
# if you try to call is_public on an image model returned by
# a call to V2 image.get(). Here, the ImageSpecV2 does not have
# an is_public attribute and MagicMock will throw an AttributeError.
img = mock.MagicMock(visibility='PUBLIC',
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
def test_v1_is_public(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False)
img = mock.MagicMock(is_public=True,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
def test_project_is_owner(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
project_id='123')
props = {
'owner_id': '123'
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
ctx.reset_mock()
img = mock.MagicMock(is_public=False, properties=props,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
def test_project_context_matches_project_prop(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
project_id='123')
props = {
'project_id': '123'
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
ctx.reset_mock()
img = mock.MagicMock(is_public=False, properties=props,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
def test_no_user_in_props(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
project_id='123')
props = {
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertFalse(res)
ctx.reset_mock()
img = mock.MagicMock(is_public=False, properties=props,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertFalse(res)
def test_user_matches_context(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
user_id='123')
props = {
'user_id': '123'
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
ctx.reset_mock()
img = mock.MagicMock(is_public=False, properties=props,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
class TestShow(test.NoDBTestCase):
"""Tests the show method of the GlanceImageService."""
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_show_success(self, is_avail_mock, trans_from_mock):
is_avail_mock.return_value = True
trans_from_mock.return_value = {'mock': mock.sentinel.trans_from}
client = mock.MagicMock()
client.call.return_value = {}
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
info = service.show(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(ctx, 1, 'get',
mock.sentinel.image_id)
is_avail_mock.assert_called_once_with(ctx, {})
trans_from_mock.assert_called_once_with({}, include_locations=False)
self.assertIn('mock', info)
self.assertEqual(mock.sentinel.trans_from, info['mock'])
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_show_not_available(self, is_avail_mock, trans_from_mock):
is_avail_mock.return_value = False
client = mock.MagicMock()
client.call.return_value = mock.sentinel.images_0
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
with testtools.ExpectedException(exception.ImageNotFound):
service.show(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(ctx, 1, 'get',
mock.sentinel.image_id)
is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
self.assertFalse(trans_from_mock.called)
@mock.patch('nova.image.glance._reraise_translated_image_exception')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_show_client_failure(self, is_avail_mock, trans_from_mock,
reraise_mock):
raised = exception.ImageNotAuthorized(image_id=123)
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.Forbidden
ctx = mock.sentinel.ctx
reraise_mock.side_effect = raised
service = glance.GlanceImageService(client)
with testtools.ExpectedException(exception.ImageNotAuthorized):
service.show(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(ctx, 1, 'get',
mock.sentinel.image_id)
self.assertFalse(is_avail_mock.called)
self.assertFalse(trans_from_mock.called)
reraise_mock.assert_called_once_with(mock.sentinel.image_id)
@mock.patch('nova.image.glance._is_image_available')
def test_show_queued_image_without_some_attrs(self, is_avail_mock):
is_avail_mock.return_value = True
client = mock.MagicMock()
# fake image cls without disk_format, container_format, name attributes
class fake_image_cls(dict):
id = 'b31aa5dd-f07a-4748-8f15-398346887584'
deleted = False
protected = False
min_disk = 0
created_at = '2014-05-20T08:16:48'
size = 0
status = 'queued'
is_public = False
min_ram = 0
owner = '980ec4870033453ead65c0470a78b8a8'
updated_at = '2014-05-20T08:16:48'
glance_image = fake_image_cls()
client.call.return_value = glance_image
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
image_info = service.show(ctx, glance_image.id)
client.call.assert_called_once_with(ctx, 1, 'get',
glance_image.id)
NOVA_IMAGE_ATTRIBUTES = set(['size', 'disk_format', 'owner',
'container_format', 'status', 'id',
'name', 'created_at', 'updated_at',
'deleted', 'deleted_at', 'checksum',
'min_disk', 'min_ram', 'is_public',
'properties'])
self.assertEqual(NOVA_IMAGE_ATTRIBUTES, set(image_info.keys()))
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_include_locations_success(self, avail_mock, trans_from_mock):
locations = [mock.sentinel.loc1]
avail_mock.return_value = True
trans_from_mock.return_value = {'locations': locations}
client = mock.Mock()
client.call.return_value = mock.sentinel.image
service = glance.GlanceImageService(client)
ctx = mock.sentinel.ctx
image_id = mock.sentinel.image_id
info = service.show(ctx, image_id, include_locations=True)
client.call.assert_called_once_with(ctx, 2, 'get', image_id)
avail_mock.assert_called_once_with(ctx, mock.sentinel.image)
trans_from_mock.assert_called_once_with(mock.sentinel.image,
include_locations=True)
self.assertIn('locations', info)
self.assertEqual(locations, info['locations'])
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_include_direct_uri_success(self, avail_mock, trans_from_mock):
locations = [mock.sentinel.loc1]
avail_mock.return_value = True
trans_from_mock.return_value = {'locations': locations,
'direct_uri': mock.sentinel.duri}
client = mock.Mock()
client.call.return_value = mock.sentinel.image
service = glance.GlanceImageService(client)
ctx = mock.sentinel.ctx
image_id = mock.sentinel.image_id
info = service.show(ctx, image_id, include_locations=True)
client.call.assert_called_once_with(ctx, 2, 'get', image_id)
expected = locations
expected.append({'url': mock.sentinel.duri, 'metadata': {}})
self.assertIn('locations', info)
self.assertEqual(expected, info['locations'])
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_do_not_show_deleted_images(self, is_avail_mock, trans_from_mock):
class fake_image_cls(dict):
id = 'b31aa5dd-f07a-4748-8f15-398346887584'
deleted = True
glance_image = fake_image_cls()
client = mock.MagicMock()
client.call.return_value = glance_image
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
with testtools.ExpectedException(exception.ImageNotFound):
service.show(ctx, glance_image.id, show_deleted=False)
client.call.assert_called_once_with(ctx, 1, 'get',
glance_image.id)
self.assertFalse(is_avail_mock.called)
self.assertFalse(trans_from_mock.called)
class TestDetail(test.NoDBTestCase):
"""Tests the detail method of the GlanceImageService."""
@mock.patch('nova.image.glance._extract_query_params')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_success_available(self, is_avail_mock, trans_from_mock,
ext_query_mock):
params = {}
is_avail_mock.return_value = True
ext_query_mock.return_value = params
trans_from_mock.return_value = mock.sentinel.trans_from
client = mock.MagicMock()
client.call.return_value = [mock.sentinel.images_0]
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
images = service.detail(ctx, **params)
client.call.assert_called_once_with(ctx, 1, 'list')
is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
trans_from_mock.assert_called_once_with(mock.sentinel.images_0)
self.assertEqual([mock.sentinel.trans_from], images)
@mock.patch('nova.image.glance._extract_query_params')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_success_unavailable(self, is_avail_mock, trans_from_mock,
ext_query_mock):
params = {}
is_avail_mock.return_value = False
ext_query_mock.return_value = params
trans_from_mock.return_value = mock.sentinel.trans_from
client = mock.MagicMock()
client.call.return_value = [mock.sentinel.images_0]
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
images = service.detail(ctx, **params)
client.call.assert_called_once_with(ctx, 1, 'list')
is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
self.assertFalse(trans_from_mock.called)
self.assertEqual([], images)
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_params_passed(self, is_avail_mock, _trans_from_mock):
client = mock.MagicMock()
client.call.return_value = [mock.sentinel.images_0]
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
service.detail(ctx, page_size=5, limit=10)
expected_filters = {
'is_public': 'none'
}
client.call.assert_called_once_with(ctx, 1, 'list',
filters=expected_filters,
page_size=5,
limit=10)
@mock.patch('nova.image.glance._reraise_translated_exception')
@mock.patch('nova.image.glance._extract_query_params')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_client_failure(self, is_avail_mock, trans_from_mock,
ext_query_mock, reraise_mock):
params = {}
ext_query_mock.return_value = params
raised = exception.Forbidden()
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.Forbidden
ctx = mock.sentinel.ctx
reraise_mock.side_effect = raised
service = glance.GlanceImageService(client)
with testtools.ExpectedException(exception.Forbidden):
service.detail(ctx, **params)
client.call.assert_called_once_with(ctx, 1, 'list')
self.assertFalse(is_avail_mock.called)
self.assertFalse(trans_from_mock.called)
reraise_mock.assert_called_once_with()
class TestCreate(test.NoDBTestCase):
"""Tests the create method of the GlanceImageService."""
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_create_success(self, trans_to_mock, trans_from_mock):
translated = {
'image_id': mock.sentinel.image_id
}
trans_to_mock.return_value = translated
trans_from_mock.return_value = mock.sentinel.trans_from
image_mock = mock.MagicMock(spec=dict)
client = mock.MagicMock()
client.call.return_value = mock.sentinel.image_meta
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
image_meta = service.create(ctx, image_mock)
trans_to_mock.assert_called_once_with(image_mock)
client.call.assert_called_once_with(ctx, 1, 'create',
image_id=mock.sentinel.image_id)
trans_from_mock.assert_called_once_with(mock.sentinel.image_meta)
self.assertEqual(mock.sentinel.trans_from, image_meta)
# Now verify that if we supply image data to the call,
# that the client is also called with the data kwarg
client.reset_mock()
service.create(ctx, image_mock, data=mock.sentinel.data)
client.call.assert_called_once_with(ctx, 1, 'create',
image_id=mock.sentinel.image_id,
data=mock.sentinel.data)
@mock.patch('nova.image.glance._reraise_translated_exception')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_create_client_failure(self, trans_to_mock, trans_from_mock,
reraise_mock):
translated = {}
trans_to_mock.return_value = translated
image_mock = mock.MagicMock(spec=dict)
raised = exception.Invalid()
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.BadRequest
ctx = mock.sentinel.ctx
reraise_mock.side_effect = raised
service = glance.GlanceImageService(client)
self.assertRaises(exception.Invalid, service.create, ctx, image_mock)
trans_to_mock.assert_called_once_with(image_mock)
self.assertFalse(trans_from_mock.called)
class TestUpdate(test.NoDBTestCase):
"""Tests the update method of the GlanceImageService."""
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_update_success(self, trans_to_mock, trans_from_mock):
translated = {
'id': mock.sentinel.image_id,
'name': mock.sentinel.name
}
trans_to_mock.return_value = translated
trans_from_mock.return_value = mock.sentinel.trans_from
image_mock = mock.MagicMock(spec=dict)
client = mock.MagicMock()
client.call.return_value = mock.sentinel.image_meta
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
image_meta = service.update(ctx, mock.sentinel.image_id, image_mock)
trans_to_mock.assert_called_once_with(image_mock)
# Verify that the 'id' element has been removed as a kwarg to
# the call to glanceclient's update (since the image ID is
# supplied as a positional arg), and that the
# purge_props default is True.
client.call.assert_called_once_with(ctx, 1, 'update',
mock.sentinel.image_id,
name=mock.sentinel.name,
purge_props=True)
trans_from_mock.assert_called_once_with(mock.sentinel.image_meta)
self.assertEqual(mock.sentinel.trans_from, image_meta)
# Now verify that if we supply image data to the call,
# that the client is also called with the data kwarg
client.reset_mock()
service.update(ctx, mock.sentinel.image_id,
image_mock, data=mock.sentinel.data)
client.call.assert_called_once_with(ctx, 1, 'update',
mock.sentinel.image_id,
name=mock.sentinel.name,
purge_props=True,
data=mock.sentinel.data)
@mock.patch('nova.image.glance._reraise_translated_image_exception')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._translate_to_glance')
def test_update_client_failure(self, trans_to_mock, trans_from_mock,
reraise_mock):
translated = {
'name': mock.sentinel.name
}
trans_to_mock.return_value = translated
trans_from_mock.return_value = mock.sentinel.trans_from
image_mock = mock.MagicMock(spec=dict)
raised = exception.ImageNotAuthorized(image_id=123)
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.Forbidden
ctx = mock.sentinel.ctx
reraise_mock.side_effect = raised
service = glance.GlanceImageService(client)
self.assertRaises(exception.ImageNotAuthorized,
service.update, ctx, mock.sentinel.image_id,
image_mock)
client.call.assert_called_once_with(ctx, 1, 'update',
mock.sentinel.image_id,
purge_props=True,
name=mock.sentinel.name)
self.assertFalse(trans_from_mock.called)
reraise_mock.assert_called_once_with(mock.sentinel.image_id)
class TestDelete(test.NoDBTestCase):
"""Tests the delete method of the GlanceImageService."""
def test_delete_success(self):
client = mock.MagicMock()
client.call.return_value = True
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
service.delete(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(ctx, 1, 'delete',
mock.sentinel.image_id)
def test_delete_client_failure(self):
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.NotFound
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
self.assertRaises(exception.ImageNotFound, service.delete, ctx,
mock.sentinel.image_id)
class TestGlanceUrl(test.NoDBTestCase):
def test_generate_glance_http_url(self):
generated_url = glance.generate_glance_url()
glance_host = CONF.glance.host
# ipv6 address, need to wrap it with '[]'
if netutils.is_valid_ipv6(glance_host):
glance_host = '[%s]' % glance_host
http_url = "http://%s:%d" % (glance_host, CONF.glance.port)
self.assertEqual(generated_url, http_url)
def test_generate_glance_https_url(self):
self.flags(protocol="https", group='glance')
generated_url = glance.generate_glance_url()
glance_host = CONF.glance.host
# ipv6 address, need to wrap it with '[]'
if netutils.is_valid_ipv6(glance_host):
glance_host = '[%s]' % glance_host
https_url = "https://%s:%d" % (glance_host, CONF.glance.port)
self.assertEqual(generated_url, https_url)
class TestGlanceApiServers(test.NoDBTestCase):
def test_get_ipv4_api_servers(self):
self.flags(api_servers=['10.0.1.1:9292',
'https://10.0.0.1:9293',
'http://10.0.2.2:9294'], group='glance')
glance_host = ['10.0.1.1', '10.0.0.1',
'10.0.2.2']
api_servers = glance.get_api_servers()
i = 0
for server in api_servers:
i += 1
self.assertIn(server[0], glance_host)
if i > 2:
break
def test_get_ipv6_api_servers(self):
self.flags(api_servers=['[2001:2012:1:f101::1]:9292',
'https://[2010:2013:1:f122::1]:9293',
'http://[2001:2011:1:f111::1]:9294'],
group='glance')
glance_host = ['2001:2012:1:f101::1', '2010:2013:1:f122::1',
'2001:2011:1:f111::1']
api_servers = glance.get_api_servers()
i = 0
for server in api_servers:
i += 1
self.assertIn(server[0], glance_host)
if i > 2:
break
class TestUpdateGlanceImage(test.NoDBTestCase):
@mock.patch('nova.image.glance.GlanceImageService')
def test_start(self, mock_glance_image_service):
consumer = glance.UpdateGlanceImage(
'context', 'id', 'metadata', 'stream')
with mock.patch.object(glance, 'get_remote_image_service') as a_mock:
a_mock.return_value = (mock_glance_image_service, 'image_id')
consumer.start()
mock_glance_image_service.update.assert_called_with(
'context', 'image_id', 'metadata', 'stream', purge_props=False)
| |
import json
import random
from datetime import datetime
from api_v3.factories import (
ExpenseFactory,
ProfileFactory,
TicketFactory
)
from api_v3.models import Action, Expense
from api_v3.serializers import ExpenseSerializer
from .support import ApiTestCase, APIClient, reverse
class ExpensesEndpointTestCase(ApiTestCase):
def setUp(self):
superuser_or_staff = random.choice([True, False])
self.client = APIClient()
self.users = [
ProfileFactory.create(),
ProfileFactory.create(
is_superuser=superuser_or_staff,
is_staff=(not superuser_or_staff)
),
ProfileFactory.create()
]
self.tickets = [
TicketFactory.create(requester=self.users[0])
]
self.expenses = [
ExpenseFactory.create(user=self.users[2], ticket=self.tickets[0])
]
def test_list_anonymous(self):
response = self.client.get(reverse('expense-list'))
self.assertEqual(response.status_code, 401)
def test_list_authenticated(self):
self.client.force_authenticate(self.users[0])
response = self.client.get(reverse('expense-list'))
self.assertEqual(response.status_code, 200)
self.assertEqual(
json.loads(response.content)['data'],
[]
)
def test_list_authenticated_superuser_or_staff(self):
self.client.force_authenticate(self.users[1])
response = self.client.get(reverse('expense-list'))
self.assertEqual(response.status_code, 200)
self.assertEqual(
json.loads(response.content)['data'][0]['id'],
str(self.expenses[0].id)
)
def test_detail_authenticated(self):
self.client.force_authenticate(self.users[0])
response = self.client.get(
reverse('expense-detail', args=[self.expenses[0].id]))
self.assertEqual(response.status_code, 404)
def test_detail_authenticated_superuser_or_staff(self):
self.client.force_authenticate(self.users[1])
response = self.client.get(
reverse('expense-detail', args=[self.expenses[0].id]))
self.assertEqual(response.status_code, 200)
self.assertEqual(
json.loads(response.content)['data']['id'],
str(self.expenses[0].id)
)
def test_create_authenticated(self):
self.client.force_authenticate(self.users[0])
expenses_count = Expense.objects.count()
new_data = self.as_jsonapi_payload(
ExpenseSerializer,
ExpenseFactory.build(ticket=self.tickets[0]),
{'notes': 'new note'}
)
response = self.client.post(
reverse('expense-list'),
data=json.dumps(new_data),
content_type=self.JSON_API_CONTENT_TYPE
)
self.assertEqual(response.status_code, 422)
self.assertEqual(Expense.objects.count(), expenses_count)
def test_create_authenticated_superuser_or_staff(self):
self.client.force_authenticate(self.users[1])
ticket = self.expenses[0].ticket
expenses_count = Expense.objects.count()
actions_count = Action.objects.filter(
target_object_id=ticket.id).count()
new_data = self.as_jsonapi_payload(
ExpenseSerializer,
ExpenseFactory.build(ticket=ticket),
{'notes': 'new notes'}
)
response = self.client.post(
reverse('expense-list'),
data=json.dumps(new_data),
content_type=self.JSON_API_CONTENT_TYPE
)
self.assertEqual(response.status_code, 201)
self.assertEqual(Expense.objects.count(), expenses_count + 1)
self.assertEqual(
Action.objects.filter(
target_object_id=ticket.id,
verb='expense:create'
).count(),
actions_count + 1
)
def test_update_authenticated(self):
self.client.force_authenticate(self.users[0])
new_data = self.as_jsonapi_payload(
ExpenseSerializer, self.expenses[0], {'notes': 'update note'}
)
response = self.client.put(
reverse('expense-detail', args=[self.expenses[0].id]),
data=json.dumps(new_data),
content_type=self.JSON_API_CONTENT_TYPE
)
self.assertEqual(response.status_code, 404)
def test_update_authenticated_superuser_or_staff(self):
self.client.force_authenticate(self.users[1])
self.expenses[0].ticket
expenses_count = Expense.objects.count()
new_date = datetime.utcnow()
new_data = self.as_jsonapi_payload(
ExpenseSerializer,
self.expenses[0],
{'notes': 'update notes', 'created_at': new_date.isoformat()}
)
response = self.client.put(
reverse('expense-detail', args=[self.expenses[0].id]),
data=json.dumps(new_data),
content_type=self.JSON_API_CONTENT_TYPE
)
self.assertEqual(response.status_code, 200)
self.assertEqual(Expense.objects.count(), expenses_count)
expense = Expense.objects.get(id=self.expenses[0].id)
self.assertEqual(expense.notes, 'update notes')
self.assertEqual(expense.created_at, new_date)
def test_delete_authenticated(self):
self.client.force_authenticate(self.users[0])
response = self.client.delete(
reverse('expense-detail', args=[self.expenses[0].id])
)
self.assertEqual(response.status_code, 404)
def test_delete_authenticated_superuser_or_staff(self):
self.client.force_authenticate(self.users[1])
actions_count = Action.objects.filter(
target_object_id=self.expenses[0].ticket.id).count()
response = self.client.delete(
reverse('expense-detail', args=[self.expenses[0].id])
)
self.assertEqual(response.status_code, 204)
self.assertEqual(
Expense.objects.filter(id=self.expenses[0].id).count(), 0
)
self.assertEqual(
Action.objects.filter(
target_object_id=self.tickets[0].id,
verb='expense:destroy'
).count(),
actions_count + 1
)
| |
from datetime import date, datetime
from functools import partial
from collections import namedtuple
from testfixtures.shouldraise import ShouldAssert
from testfixtures.tests.sample1 import SampleClassA, SampleClassB, Slotted
from .mock import Mock, call
from re import compile
from testfixtures import (
Comparison as C,
Replacer,
ShouldRaise,
compare,
generator,
singleton,
)
from testfixtures.compat import (
class_type_name, exception_module, PY3, xrange,
BytesLiteral, UnicodeLiteral,
PY2
)
from testfixtures.comparison import compare_sequence
from unittest import TestCase
hexaddr = compile('0x[0-9A-Fa-f]+')
def hexsub(raw):
return hexaddr.sub('...', raw)
call_list_repr = repr(Mock().mock_calls.__class__)
marker = object()
class CompareHelper(object):
def check_raises(self, x=marker, y=marker, message=None, regex=None,
compare=compare, **kw):
args = []
for value in x, y:
if value is not marker:
args.append(value)
try:
compare(*args, **kw)
except Exception as e:
if not isinstance(e, AssertionError): # pragma: no cover
raise
actual = hexsub(e.args[0])
if message is not None:
# handy for debugging, but can't be relied on for tests!
compare(actual, expected=message, show_whitespace=True)
assert actual==message
else:
if not regex.match(actual): # pragma: no cover
raise AssertionError(
'%r did not match %r' % (actual, regex.pattern)
)
else:
raise AssertionError('No exception raised!')
class TestCompare(CompareHelper, TestCase):
def test_object_same(self):
o = object()
compare(o, o)
def test_object_diff(self):
self.check_raises(
object(), object(),
'<object object at ...> != <object object at ...>'
)
def test_different_types(self):
self.check_raises('x', 1, "'x' != 1")
def test_number_same(self):
compare(1, 1)
def test_number_different(self):
self.check_raises(1, 2, '1 != 2')
def test_different_with_labels(self):
self.check_raises(1, 2, '1 (expected) != 2 (actual)',
x_label='expected', y_label='actual')
def test_string_same(self):
compare('x', 'x')
def test_unicode_string_different(self):
if PY2:
expected = "u'a' != 'b'"
else:
expected = "'a' != b'b'"
self.check_raises(
UnicodeLiteral('a'), BytesLiteral('b'),
expected
)
def test_bytes_different(self):
if PY2:
expected = (
"\n"
"'12345678901'\n"
'!=\n'
"'12345678902'"
)
else:
expected = (
"\n"
"b'12345678901'\n"
'!=\n'
"b'12345678902'"
)
self.check_raises(
BytesLiteral('12345678901'),
BytesLiteral('12345678902'),
expected
)
def test_bytes_same_strict(self):
compare(actual=b'', expected=b'', strict=True)
if PY3:
def test_moar_bytes_different(self):
self.check_raises(
actual=b'{"byte_pound":"b\'\\\\xa3\'"}',
expected=b'{"byte_pound":"b\\\'\\xa3\'"}',
message = (
"\n"
"b'{\"byte_pound\":\"b\\\\\\'\\\\xa3\\\'\"}' (expected)\n"
'!=\n'
"b'{\"byte_pound\":\"b\\\'\\\\\\\\xa3\\\'\"}' (actual)"
)
)
def test_string_diff_short(self):
self.check_raises(
'\n'+('x'*9), '\n'+('y'*9),
"'\\nxxxxxxxxx' != '\\nyyyyyyyyy'"
)
def test_string_diff_long(self):
self.check_raises(
'x'*11, 'y'*11,
"\n'xxxxxxxxxxx'\n!=\n'yyyyyyyyyyy'"
)
def test_string_diff_long_newlines(self):
self.check_raises(
'x'*5+'\n'+'y'*5, 'x'*5+'\n'+'z'*5,
"\n--- first\n+++ second\n@@ -1,2 +1,2 @@\n xxxxx\n-yyyyy\n+zzzzz"
)
def test_string_diff_short_labels(self):
self.check_raises(
'\n'+('x'*9), '\n'+('y'*9),
"'\\nxxxxxxxxx' (expected) != '\\nyyyyyyyyy' (actual)",
x_label='expected',
y_label='actual'
)
def test_string_diff_long_labels(self):
self.check_raises(
'x'*11, 'y'*11,
"\n'xxxxxxxxxxx' (expected)\n!=\n'yyyyyyyyyyy' (actual)",
x_label='expected',
y_label='actual'
)
def test_string_diff_long_newlines_labels(self):
self.check_raises(
'x'*5+'\n'+'y'*5, 'x'*5+'\n'+'z'*5,
"\n--- expected\n+++ actual\n"
"@@ -1,2 +1,2 @@\n xxxxx\n-yyyyy\n+zzzzz",
x_label='expected',
y_label='actual'
)
def test_exception_same_object(self):
e = ValueError('some message')
compare(e, e)
def test_exception_same_c_wrapper(self):
e1 = ValueError('some message')
e2 = ValueError('some message')
compare(C(e1), e2)
def test_exception_different_object(self):
e1 = ValueError('some message')
e2 = ValueError('some message')
compare(e1, e2)
def test_exception_different_object_c_wrapper(self):
e1 = ValueError('some message')
e2 = ValueError('some message')
compare(C(e1), e2)
def test_exception_diff(self):
e1 = ValueError('some message')
e2 = ValueError('some other message')
self.check_raises(
e1, e2,
"ValueError('some message',) != ValueError('some other message',)"
)
def test_exception_diff_c_wrapper(self):
e1 = ValueError('some message')
e2 = ValueError('some other message')
self.check_raises(
C(e1), e2,
("\n"
"<C(failed):{0}.ValueError>\n"
"attributes differ:\n"
"'args': ('some message',) (Comparison) "
"!= ('some other message',) (actual)\n"
"</C>"
" != ValueError('some other message',)"
).format(exception_module))
def test_sequence_long(self):
self.check_raises(
['quite a long string 1', 'quite a long string 2',
'quite a long string 3', 'quite a long string 4',
'quite a long string 5', 'quite a long string 6',
'quite a long string 7', 'quite a long string 8'],
['quite a long string 1', 'quite a long string 2',
'quite a long string 3', 'quite a long string 4',
'quite a long string 9', 'quite a long string 10',
'quite a long string 11', 'quite a long string 12'],
"sequence not as expected:\n\n"
"same:\n"
"['quite a long string 1',\n"
" 'quite a long string 2',\n"
" 'quite a long string 3',\n"
" 'quite a long string 4']\n\n"
"first:\n"
"['quite a long string 5',\n"
" 'quite a long string 6',\n"
" 'quite a long string 7',\n"
" 'quite a long string 8']\n\n"
"second:\n"
"['quite a long string 9',\n"
" 'quite a long string 10',\n"
" 'quite a long string 11',\n"
" 'quite a long string 12']\n"
"\n"
"While comparing [4]: \n"
"'quite a long string 5'\n"
"!=\n"
"'quite a long string 9'"
)
def test_sequence_different_labels_supplied(self):
self.check_raises(
[1, 2, 3], [1, 2, 4],
"sequence not as expected:\n\n"
"same:\n"
"[1, 2]\n\n"
"expected:\n"
"[3]\n\n"
"actual:\n"
"[4]",
x_label='expected',
y_label='actual',
)
def test_list_same(self):
compare([1, 2, 3], [1, 2, 3])
def test_list_different(self):
self.check_raises(
[1, 2, 3], [1, 2, 4],
"sequence not as expected:\n\n"
"same:\n"
"[1, 2]\n\n"
"first:\n"
"[3]\n\n"
"second:\n"
"[4]"
)
def test_list_totally_different(self):
self.check_raises(
[1], [2],
"sequence not as expected:\n\n"
"same:\n"
"[]\n\n"
"first:\n"
"[1]\n\n"
"second:\n"
"[2]"
)
def test_list_first_shorter(self):
self.check_raises(
[1, 2], [1, 2, 3],
"sequence not as expected:\n\n"
"same:\n[1, 2]\n\n"
"first:\n[]\n\n"
"second:\n[3]"
)
def test_list_second_shorter(self):
self.check_raises(
[1, 2, 3], [1, 2],
"sequence not as expected:\n\n"
"same:\n[1, 2]\n\n"
"first:\n[3]\n\n"
"second:\n[]"
)
def test_dict_same(self):
compare(dict(x=1), dict(x=1))
def test_dict_first_missing_keys(self):
self.check_raises(
dict(), dict(z=3),
"dict not as expected:\n"
"\n"
"in second but not first:\n"
"'z': 3"
)
def test_dict_second_missing_keys(self):
self.check_raises(
dict(z=3), dict(),
"dict not as expected:\n"
"\n"
"in first but not second:\n"
"'z': 3"
)
def test_dict_values_different(self):
self.check_raises(
dict(x=1), dict(x=2),
"dict not as expected:\n"
"\n"
"values differ:\n"
"'x': 1 != 2"
)
def test_dict_labels_specified(self):
self.check_raises(
dict(x=1, y=2), dict(x=2, z=3),
"dict not as expected:\n"
"\n"
"in expected but not actual:\n"
"'y': 2\n"
"\n"
"in actual but not expected:\n"
"'z': 3\n"
"\n"
"values differ:\n"
"'x': 1 (expected) != 2 (actual)",
x_label='expected',
y_label='actual'
)
def test_dict_tuple_keys_same_value(self):
compare({(1, 2): None}, {(1, 2): None})
def test_dict_tuple_keys_different_value(self):
self.check_raises(
{(1, 2): 3},
{(1, 2): 42},
"dict not as expected:\n"
"\n"
"values differ:\n"
"(1, 2): 3 != 42"
)
def test_dict_full_diff(self):
self.check_raises(
dict(x=1, y=2, a=4), dict(x=1, z=3, a=5),
"dict not as expected:\n"
"\n"
'same:\n'
"['x']\n"
"\n"
"in first but not second:\n"
"'y': 2\n"
'\n'
"in second but not first:\n"
"'z': 3\n"
'\n'
"values differ:\n"
"'a': 4 != 5"
)
def test_dict_consistent_ordering(self):
self.check_raises(
dict(xa=1, xb=2, ya=1, yb=2, aa=3, ab=4),
dict(xa=1, xb=2, za=3, zb=4, aa=5, ab=5),
"dict not as expected:\n"
"\n"
'same:\n'
"['xa', 'xb']\n"
"\n"
"in first but not second:\n"
"'ya': 1\n"
"'yb': 2\n"
'\n'
"in second but not first:\n"
"'za': 3\n"
"'zb': 4\n"
'\n'
"values differ:\n"
"'aa': 3 != 5\n"
"'ab': 4 != 5"
)
def test_dict_consistent_ordering_types_same(self):
if PY3:
same = "[6, None]\n"
else:
same = "[None, 6]\n"
self.check_raises(
{None: 1, 6: 2, 1: 3},
{None: 1, 6: 2, 1: 4},
"dict not as expected:\n"
"\n"+
'same:\n'+
same+
"\n"
"values differ:\n"
"1: 3 != 4"
)
def test_dict_consistent_ordering_types_x_not_y(self):
self.check_raises(
{None: 1, 3: 2},
{},
"dict not as expected:\n"
"\n"
"in first but not second:\n"
"3: 2\n"
"None: 1"
)
def test_dict_consistent_ordering_types_y_not_x(self):
self.check_raises(
{},
{None: 1, 3: 2},
"dict not as expected:\n"
"\n"
"in second but not first:\n"
"3: 2\n"
"None: 1"
)
def test_dict_consistent_ordering_types_value(self):
self.check_raises(
{None: 1, 6: 2},
{None: 3, 6: 4},
"dict not as expected:\n"
"\n"
"values differ:\n"
"6: 2 != 4\n"
"None: 1 != 3"
)
def test_set_same(self):
compare(set([1]), set([1]))
def test_set_first_missing_keys(self):
self.check_raises(
set(), set([3]),
"set not as expected:\n"
"\n"
"in second but not first:\n"
"[3]\n"
'\n'
)
def test_set_second_missing_keys(self):
self.check_raises(
set([3]), set(),
"set not as expected:\n"
"\n"
"in first but not second:\n"
"[3]\n"
'\n'
)
def test_set_full_diff(self):
self.check_raises(
set([1, 2, 4]), set([1, 3, 5]),
"set not as expected:\n"
"\n"
"in first but not second:\n"
"[2, 4]\n"
'\n'
"in second but not first:\n"
"[3, 5]\n"
'\n'
)
def test_set_type_ordering(self):
self.check_raises(
{None, 1}, {'', 2},
"set not as expected:\n"
"\n"
"in first but not second:\n"
"[1, None]\n"
'\n'
"in second but not first:\n"
"['', 2]\n"
'\n'
)
def test_set_labels(self):
self.check_raises(
set([1, 2, 4]), set([1, 3, 5]),
"set not as expected:\n"
"\n"
"in expected but not actual:\n"
"[2, 4]\n"
'\n'
"in actual but not expected:\n"
"[3, 5]\n"
'\n',
x_label='expected',
y_label='actual',
)
def test_tuple_same(self):
compare((1, 2, 3), (1, 2, 3))
def test_tuple_different(self):
self.check_raises(
(1, 2, 3), (1, 2, 4),
"sequence not as expected:\n\n"
"same:\n(1, 2)\n\n"
"first:\n(3,)\n\n"
"second:\n(4,)"
)
def test_tuple_totally_different(self):
self.check_raises(
(1, ), (2, ),
"sequence not as expected:\n\n"
"same:\n()\n\n"
"first:\n(1,)\n\n"
"second:\n(2,)"
)
def test_tuple_first_shorter(self):
self.check_raises(
(1, 2), (1, 2, 3),
"sequence not as expected:\n\n"
"same:\n(1, 2)\n\n"
"first:\n()\n\n"
"second:\n(3,)"
)
def test_tuple_second_shorter(self):
self.check_raises(
(1, 2, 3), (1, 2),
"sequence not as expected:\n\n"
"same:\n(1, 2)\n\n"
"first:\n(3,)\n\n"
"second:\n()"
)
def test_generator_same(self):
compare(generator(1, 2, 3), generator(1, 2, 3))
def test_generator_different(self):
self.check_raises(
generator(1, 2, 3), generator(1, 2, 4),
"sequence not as expected:\n\n"
"same:\n(1, 2)\n\n"
"first:\n(3,)\n\n"
"second:\n(4,)"
)
def test_generator_totally_different(self):
self.check_raises(
generator(1, ), generator(2, ),
"sequence not as expected:\n\n"
"same:\n()\n\n"
"first:\n(1,)\n\n"
"second:\n(2,)"
)
def test_generator_first_shorter(self):
self.check_raises(
generator(1, 2), generator(1, 2, 3),
"sequence not as expected:\n\n"
"same:\n(1, 2)\n\n"
"first:\n()\n\n"
"second:\n(3,)"
)
def test_generator_second_shorted(self):
self.check_raises(
generator(1, 2, 3), generator(1, 2),
"sequence not as expected:\n\n"
"same:\n(1, 2)\n\n"
"first:\n(3,)\n\n"
"second:\n()"
)
def test_nested_generator_different(self):
self.check_raises(
generator(1, 2, generator(3), 4),
generator(1, 2, generator(3), 5),
"sequence not as expected:\n"
"\n"
"same:\n"
"(1, 2, <generator object generator at ...>)\n"
"\n"
"first:\n"
"(4,)\n"
"\n"
"second:\n"
"(5,)"
)
def test_nested_generator_tuple_left(self):
compare(
generator(1, 2, (3, ), 4),
generator(1, 2, generator(3), 4),
)
def test_nested_generator_tuple_right(self):
compare(
generator(1, 2, generator(3), 4),
generator(1, 2, (3, ), 4),
)
def test_sequence_and_generator(self):
compare((1, 2, 3), generator(1, 2, 3))
def test_sequence_and_generator_strict(self):
expected = compile(
"\(1, 2, 3\) \(<(class|type) 'tuple'>\) \(expected\) != "
"<generator object (generator )?at... "
"\(<(class|type) 'generator'>\) \(actual\)"
)
self.check_raises(
(1, 2, 3), generator(1, 2, 3),
regex=expected,
strict=True,
x_label='expected',
y_label='actual',
)
def test_generator_and_sequence(self):
compare(generator(1, 2, 3), (1, 2, 3))
def test_iterable_with_iterable_same(self):
compare(xrange(1, 4), xrange(1, 4))
def test_iterable_with_iterable_different(self):
self.check_raises(
xrange(1, 4), xrange(1, 3),
"sequence not as expected:\n"
"\n"
"same:\n"
"(1, 2)\n"
"\n"
"first:\n"
"(3,)\n"
"\n"
"second:\n"
"()"
)
def test_iterable_and_generator(self):
compare(xrange(1, 4), generator(1, 2, 3))
def test_iterable_and_generator_strict(self):
expected = compile(
"x?range\(1, 4\) \(<(class|type) 'x?range'>\) != "
"<generator object (generator )?at... "
"\(<(class|type) 'generator'>\)"
)
self.check_raises(
xrange(1, 4), generator(1, 2, 3),
regex=expected,
strict=True,
)
def test_generator_and_iterable(self):
compare(generator(1, 2, 3), xrange(1, 4))
def test_tuple_and_list(self):
compare((1, 2, 3), [1, 2, 3])
def test_tuple_and_list_strict(self):
if PY2:
expected = ("(1, 2, 3) (<type 'tuple'>) != "
"[1, 2, 3] (<type 'list'>)")
else:
expected = ("(1, 2, 3) (<class 'tuple'>) != "
"[1, 2, 3] (<class 'list'>)")
self.check_raises(
(1, 2, 3), [1, 2, 3],
expected,
strict=True
)
def test_float_subclass_strict(self):
class TestFloat(float):
pass
compare(TestFloat(0.75), TestFloat(0.75), strict=True)
def test_old_style_classes_same(self):
class X:
pass
compare(X, X)
def test_old_style_classes_different(self):
if PY3:
expected = (
"<class 'testfixtures.tests.test_compare.TestCompare."
"test_old_style_classes_different.<locals>.X'>"
" != "
"<class 'testfixtures.tests.test_compare.TestCompare."
"test_old_style_classes_different.<locals>.Y'>"
)
else:
expected = (
"<class testfixtures.tests.test_compare.X at ...>"
" != "
"<class testfixtures.tests.test_compare.Y at ...>"
)
class X:
pass
class Y:
pass
self.check_raises(X, Y, expected)
def test_show_whitespace(self):
# does nothing! ;-)
self.check_raises(
' x \n\r', ' x \n \t',
"' x \\n\\r' != ' x \\n \\t'",
show_whitespace=True
)
def test_show_whitespace_long(self):
self.check_raises(
"\t \n '", '\r \n ',
'\n--- first\n'
'+++ second\n'
'@@ -1,2 +1,2 @@\n'
'-\'\\t \\n\'\n'
'-" \'"\n'
'+\'\\r \\n\'\n'
'+\' \'',
show_whitespace=True
)
def test_show_whitespace_equal(self):
compare('x', 'x', show_whitespace=True)
def test_show_whitespace_not_used_because_of_other_difference(self):
self.check_raises(
(1, 'a'),
(2, 'b'),
"sequence not as expected:\n"
"\n"
"same:\n"
"()\n"
"\n"
"first:\n"
"(1, 'a')\n"
"\n"
"second:\n"
"(2, 'b')",
show_whitespace=False
)
def test_include_trailing_whitespace(self):
self.check_raises(
' x \n', ' x \n',
"' x \\n' != ' x \\n'"
)
def test_ignore_trailing_whitespace(self):
compare(' x \t\n', ' x\t \n', trailing_whitespace=False)
def test_ignore_trailing_whitespace_non_string(self):
self.check_raises(
1, '',
"1 != ''",
trailing_whitespace=False
)
def test_ignore_trailing_whitespace_but_respect_leading_whitespace(self):
# NB: careful: this strips off the last newline too
# DON'T use if you care about that!
self.check_raises(
'a\n b\n c\n',
'a\nb\nc\n',
"'a\\n b\\n c' != 'a\\nb\\nc'",
trailing_whitespace=False
)
def test_include_blank_lines(self):
self.check_raises(
'\n \n', '\n ',
"'\\n \\n' != '\\n '"
)
def test_ignore_blank_lines(self):
compare("""
a
\t
b
""",
' a\nb', blanklines=False)
def test_ignore_blank_lines_non_string(self):
self.check_raises(
1, '',
"1 != ''",
blanklines=False
)
def test_supply_comparer(self):
def compare_dict(x, y, context):
self.assertEqual(x, {1: 1})
self.assertEqual(y, {2: 2})
self.assertEqual(context.get_option('foo'), 'bar')
return 'not equal'
with ShouldAssert('not equal'):
compare({1: 1}, {2: 2},
foo='bar',
comparers={dict: compare_dict})
def test_register_more_specific(self):
class_ = namedtuple('Test', 'x')
with ShouldAssert('compare class_'):
compare(class_(1), class_(2),
comparers={
tuple: Mock(return_value='compare tuple'),
class_: Mock(return_value='compare class_')
})
def test_extra_comparers_leave_existing(self):
class MyObject(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'MyObject instance'
def compare_my_object(x, y, context):
return '%s != %s' % (x.name, y.name)
with Replacer() as r:
r.replace('testfixtures.comparison._registry', {
list: compare_sequence,
})
self.check_raises(
[1, MyObject('foo')], [1, MyObject('bar')],
"sequence not as expected:\n"
"\n"
"same:\n"
"[1]\n"
"\n"
"first:\n"
"[MyObject instance]\n"
"\n"
"second:\n"
"[MyObject instance]\n"
"\n"
"While comparing [1]: foo != bar",
comparers={MyObject: compare_my_object}
)
def test_list_subclass(self):
class MyList(list): pass
a_list = MyList([1])
b_list = MyList([2])
self.check_raises(
a_list, b_list,
"sequence not as expected:\n\n"
"same:\n[]\n\n"
"first:\n[1]\n\n"
"second:\n[2]"
)
def test_strict_okay(self):
m = object()
compare(m, m, strict=True)
def test_strict_comparer_supplied(self):
compare_obj = Mock()
compare_obj.return_value = 'not equal'
self.check_raises(
object(), object(),
"not equal",
strict=True,
comparers={object: compare_obj},
)
def test_strict_default_comparer(self):
class MyList(list):
pass
# default comparer used!
self.check_raises(
MyList((1, 2, 3)), MyList((1, 2, 4)),
"sequence not as expected:\n"
"\n"
"same:\n"
"[1, 2]\n"
"\n"
"first:\n"
"[3]\n"
"\n"
"second:\n"
"[4]",
strict=True,
)
def test_list_subclass_strict(self):
m = Mock()
m.aCall()
self.check_raises(
[call.aCall()], m.method_calls,
("[call.aCall()] (<{0} 'list'>) != [call.aCall()] "
"({1})").format(class_type_name, call_list_repr),
strict=True,
)
def test_list_subclass_long_strict(self):
m = Mock()
m.call('X'*20)
self.check_raises(
[call.call('Y'*20)], m.method_calls,
("[call.call('YYYYYYYYYYYYYYYYYY... "
"(<{0} 'list'>) != "
"[call.call('XXXXXXXXXXXXXXXXXX... "
"({1})").format(class_type_name, call_list_repr),
strict=True,
)
def test_prefix(self):
self.check_raises(1, 2, 'wrong number of orders: 1 != 2',
prefix='wrong number of orders')
def test_prefix_multiline(self):
self.check_raises(
'x'*5+'\n'+'y'*5, 'x'*5+'\n'+'z'*5,
"file content: \n--- first\n+++ second\n"
"@@ -1,2 +1,2 @@\n xxxxx\n-yyyyy\n+zzzzz",
prefix='file content'
)
def test_suffix(self):
self.check_raises(
1, 2,
'1 != 2\n'
'additional context',
suffix='additional context',
)
def test_labels_multiline(self):
self.check_raises(
'x'*5+'\n'+'y'*5, 'x'*5+'\n'+'z'*5,
"\n--- expected\n+++ actual\n"
"@@ -1,2 +1,2 @@\n xxxxx\n-yyyyy\n+zzzzz",
x_label='expected',
y_label='actual'
)
def test_generator_with_non_generator(self):
self.check_raises(
generator(1, 2, 3), None,
'<generator object generator at ...> != None',
)
def test_generator_with_buggy_generator(self):
def bad_gen():
yield 1
# raising a TypeError here is important :-/
raise TypeError('foo')
with ShouldRaise(TypeError('foo')):
compare(generator(1, 2, 3), bad_gen())
def test_nested_dict_tuple_values_different(self):
self.check_raises(
dict(x=(1, 2, 3)), dict(x=(1, 2, 4)),
"dict not as expected:\n"
"\n"
"values differ:\n"
"'x': (1, 2, 3) != (1, 2, 4)\n"
'\n'
"While comparing ['x']: sequence not as expected:\n"
"\n"
"same:\n"
"(1, 2)\n"
"\n"
"first:\n"
"(3,)\n"
"\n"
"second:\n"
"(4,)"
)
def test_nested_dict_different(self):
self.check_raises(
dict(x=dict(y=1)), dict(x=dict(y=2)),
"dict not as expected:\n"
"\n"
"values differ:\n"
"'x': {'y': 1} != {'y': 2}\n"
'\n'
"While comparing ['x']: dict not as expected:\n"
"\n"
"values differ:\n"
"'y': 1 != 2"
)
def test_nested_dict_empty_but_same(self):
compare(dict(x=dict()), dict(x=dict()), ignore_eq=True)
def test_nested_dict_empty_with_keys(self):
compare(dict(x=dict(x=1)), dict(x=dict(x=1)), ignore_eq=True)
def test_tuple_list_different(self):
self.check_raises(
(1, [2, 3, 5]), (1, [2, 4, 5]),
"sequence not as expected:\n"
"\n"
"same:\n"
"(1,)\n"
"\n"
"first:\n"
"([2, 3, 5],)\n"
"\n"
"second:\n"
"([2, 4, 5],)\n"
"\n"
"While comparing [1]: sequence not as expected:\n"
"\n"
"same:\n"
"[2]\n"
"\n"
"first:\n"
"[3, 5]\n"
"\n"
"second:\n"
"[4, 5]"
)
def test_tuple_long_strings_different(self):
self.check_raises(
(1, 2, "foo\nbar\nbaz\n", 4),
(1, 2, "foo\nbob\nbaz\n", 4),
"sequence not as expected:\n"
"\n"
"same:\n"
"(1, 2)\n"
"\n"
"first:\n"
"('foo\\nbar\\nbaz\\n', 4)\n"
"\n"
"second:\n"
"('foo\\nbob\\nbaz\\n', 4)\n"
"\n"
"While comparing [2]: \n"
"--- first\n"
"+++ second\n"
"@@ -1,4 +1,4 @@\n"
# check that show_whitespace bubbles down
" 'foo\\n'\n"
"-'bar\\n'\n"
"+'bob\\n'\n"
" 'baz\\n'\n"
" ''",
show_whitespace=True
)
def test_dict_multiple_differences(self):
self.check_raises(
dict(x=(1, 2, 3), y=(4, 5, 6, )),
dict(x=(1, 2, 4), y=(4, 5, 7, )),
"dict not as expected:\n"
"\n"
"values differ:\n"
"'x': (1, 2, 3) != (1, 2, 4)\n"
"'y': (4, 5, 6) != (4, 5, 7)\n"
"\n"
"While comparing ['x']: sequence not as expected:\n"
"\n"
"same:\n"
"(1, 2)\n"
"\n"
"first:\n"
"(3,)\n"
"\n"
"second:\n"
"(4,)\n"
"\n"
"While comparing ['y']: sequence not as expected:\n"
"\n"
"same:\n"
"(4, 5)\n"
"\n"
"first:\n"
"(6,)\n"
"\n"
"second:\n"
"(7,)"
)
def test_deep_breadcrumbs(self):
obj1 = singleton('obj1')
obj2 = singleton('obj2')
gen1 = generator(obj1, obj2)
gen2 = generator(obj1, )
# dict -> list -> tuple -> generator
self.check_raises(
dict(x=[1, ('a', 'b', gen1), 3], y=[3, 4]),
dict(x=[1, ('a', 'b', gen2), 3], y=[3, 4]), (
"dict not as expected:\n"
"\n"
"same:\n"
"['y']\n"
"\n"
"values differ:\n"
"'x': [1, ('a', 'b', {gen1}), 3] != [1, ('a', 'b', {gen2}), 3]"
"\n\n"
"While comparing ['x']: sequence not as expected:\n"
"\n"
"same:\n"
"[1]\n"
"\n"
"first:\n"
"[('a', 'b', {gen1}), 3]\n"
"\n"
"second:\n"
"[('a', 'b', {gen2}), 3]\n"
"\n"
"While comparing ['x'][1]: sequence not as expected:\n"
"\n"
"same:\n"
"('a', 'b')\n"
"\n"
"first:\n"
"({gen1},)\n"
"\n"
"second:\n"
"({gen2},)\n"
"\n"
"While comparing ['x'][1][2]: sequence not as expected:\n"
"\n"
"same:\n"
"(<obj1>,)\n"
"\n"
"first:\n"
"(<obj2>,)\n"
"\n"
"second:\n"
"()"
).format(gen1=hexsub(repr(gen1)),
gen2=hexsub(repr(gen2)))
)
def test_nested_labels(self):
obj1 = singleton('obj1')
obj2 = singleton('obj2')
gen1 = generator(obj1, obj2)
gen2 = generator(obj1, )
# dict -> list -> tuple -> generator
self.check_raises(
dict(x=[1, ('a', 'b', gen1), 3], y=[3, 4]),
dict(x=[1, ('a', 'b', gen2), 3], y=[3, 4]), (
"dict not as expected:\n"
"\n"
"same:\n"
"['y']\n"
"\n"
"values differ:\n"
"'x': [1, ('a', 'b', {gen1}), 3] (expected) != "
"[1, ('a', 'b', {gen2}), 3] (actual)\n"
"\n"
"While comparing ['x']: sequence not as expected:\n"
"\n"
"same:\n"
"[1]\n"
"\n"
"expected:\n"
"[('a', 'b', {gen1}), 3]\n"
"\n"
"actual:\n"
"[('a', 'b', {gen2}), 3]\n"
"\n"
"While comparing ['x'][1]: sequence not as expected:\n"
"\n"
"same:\n"
"('a', 'b')\n"
"\n"
"expected:\n"
"({gen1},)\n"
"\n"
"actual:\n"
"({gen2},)\n"
"\n"
"While comparing ['x'][1][2]: sequence not as expected:\n"
"\n"
"same:\n"
"(<obj1>,)\n"
"\n"
"expected:\n"
"(<obj2>,)\n"
"\n"
"actual:\n"
"()"
).format(gen1=hexsub(repr(gen1)),
gen2=hexsub(repr(gen2))),
x_label='expected',
y_label='actual',
)
def test_nested_strict_only_type_difference(self):
MyTuple = namedtuple('MyTuple', 'x y z')
type_repr = repr(MyTuple)
tuple_repr = repr(tuple)
self.check_raises(
[MyTuple(1, 2, 3)],
[(1, 2, 3)],
("sequence not as expected:\n"
"\n"
"same:\n"
"[]\n"
"\n"
"first:\n"
"[MyTuple(x=1, y=2, z=3)]\n"
"\n"
"second:\n"
"[(1, 2, 3)]\n"
"\n"
"While comparing [0]: MyTuple(x=1, y=2, z=3) "
"(%s) "
"!= (1, 2, 3) "
"(%s)") % (type_repr, tuple_repr),
strict=True
)
def test_strict_nested_different(self):
if PY2:
expected = "[1, 2] (<type 'list'>) != (1, 3) (<type 'tuple'>)"
else:
expected = "[1, 2] (<class 'list'>) != (1, 3) (<class 'tuple'>)"
self.check_raises(
(1, 2, [1, 2]), (1, 2, (1, 3)),
"sequence not as expected:\n"
"\n"
"same:\n"
"(1, 2)\n"
"\n"
"first:\n"
"([1, 2],)\n"
"\n"
"second:\n"
"((1, 3),)"
"\n\n"
"While comparing [2]: " + expected,
strict=True,
)
def test_namedtuple_equal(self):
class_ = namedtuple('Foo', 'x')
compare(class_(1), class_(1))
def test_namedtuple_same_type(self):
class_ = namedtuple('Foo', 'x y')
self.check_raises(
class_(1, 2), class_(1, 3),
"Foo not as expected:\n\n"
"same:\n"
"['x']\n\n"
"values differ:\n"
"'y': 2 != 3"
)
def test_namedtuple_different_type(self):
class_a = namedtuple('Foo', 'x y')
class_b = namedtuple('Bar', 'x y z')
self.check_raises(
class_a(1, 2), class_b(1, 2, 3),
"Foo(x=1, y=2) (<class 'testfixtures.tests.test_compare.Foo'>) != "
"Bar(x=1, y=2, z=3) "
"(<class 'testfixtures.tests.test_compare.Bar'>)"
)
def test_dict_with_list(self):
self.check_raises(
{1: 'one', 2: 'two'}, [1, 2],
"{1: 'one', 2: 'two'} != [1, 2]"
)
def test_explicit_expected(self):
self.check_raises('x', expected='y',
message="'y' (expected) != 'x' (actual)")
def test_explicit_actual(self):
self.check_raises('x', actual='y',
message="'x' (expected) != 'y' (actual)")
def test_explicit_both(self):
self.check_raises(message="'x' (expected) != 'y' (actual)",
expected='x', actual='y')
def test_explicit_and_labels(self):
self.check_raises(message="'x' (x_label) != 'y' (y_label)",
expected='x', actual='y',
x_label='x_label', y_label='y_label')
def test_invalid_two_args_expected(self):
with ShouldRaise(TypeError(
"Exactly two objects needed, you supplied: ['z', 'x', 'y']"
)):
compare('x', 'y', expected='z')
def test_invalid_two_args_actual(self):
with ShouldRaise(TypeError(
"Exactly two objects needed, you supplied: ['x', 'y', 'z']"
)):
compare('x', 'y', actual='z')
def test_invalid_zero_args(self):
with ShouldRaise(TypeError(
'Exactly two objects needed, you supplied: []'
)):
compare()
def test_invalid_one_args(self):
with ShouldRaise(TypeError(
"Exactly two objects needed, you supplied: ['x']"
)):
compare('x')
def test_invalid_three_args(self):
with ShouldRaise(TypeError(
"Exactly two objects needed, you supplied: ['x', 'y', 'z']"
)):
compare('x', 'y', 'z')
def test_dont_raise(self):
self.assertEqual(compare('x', 'y', raises=False), "'x' != 'y'")
class OrmObj(object):
def __init__(self, a):
self.a = a
def __eq__(self, other):
return True
def __repr__(self):
return 'OrmObj: '+str(self.a)
def test_django_orm_is_horrible(self):
self.assertTrue(self.OrmObj(1) == self.OrmObj(2))
def query_set():
yield self.OrmObj(1)
yield self.OrmObj(2)
self.check_raises(
message=(
"sequence not as expected:\n"
"\n"
"same:\n"
"(OrmObj: 1,)\n"
"\n"
"expected:\n"
"(OrmObj: 3,)\n"
"\n"
"actual:\n"
"(OrmObj: 2,)\n"
'\n'
'While comparing [1]: OrmObj not as expected:\n'
'\n'
'attributes differ:\n'
"'a': 3 (expected) != 2 (actual)"
),
expected=[self.OrmObj(1), self.OrmObj(3)],
actual=query_set(),
ignore_eq=True
)
def test_django_orm_is_horrible_part_2(self):
t_compare = partial(compare, ignore_eq=True)
t_compare(self.OrmObj(1), self.OrmObj(1))
t_compare(self.OrmObj('some longish string'),
self.OrmObj('some longish string'))
t_compare(self.OrmObj(date(2016, 1, 1)),
self.OrmObj(date(2016, 1, 1)))
def test_django_orm_is_horrible_part_3(self):
compare(
expected=self.OrmObj(1),
actual=self.OrmObj(1),
ignore_eq=True
)
def test_django_orm_is_horrible_part_4(self):
self.check_raises(
message='[1] (expected) != 2 (actual)',
expected=[1],
actual=2,
ignore_eq=True
)
def test_mock_call_same(self):
m = Mock()
m.foo(1, 2, x=3)
compare(m.mock_calls, m.mock_calls)
def test_mock_call_same_strict(self):
m = Mock()
m.foo(1, 2, x=3)
compare(m.mock_calls, m.mock_calls, strict=True)
def test_calls_different(self):
m1 =Mock()
m2 =Mock()
m1.foo(1, 2, x=3, y=4)
m2.bar(1, 3, x=7, y=4)
self.check_raises(
m1.mock_calls,
m2.mock_calls,
"sequence not as expected:\n"
"\n"
"same:\n"
"[]\n"
"\n"
"first:\n"
"[call.foo(1, 2, x=3, y=4)]\n"
"\n"
"second:\n"
"[call.bar(1, 3, x=7, y=4)]"
"\n\n"
'While comparing [0]: mock.call not as expected:\n'
'\n'
"While comparing [0] function name: 'foo' != 'bar'\n"
'\n'
'While comparing [0] args: sequence not as expected:\n'
'\n'
'same:\n'
'(1,)\n'
'\n'
'first:\n'
'(2,)\n'
'\n'
'second:\n'
'(3,)\n'
'\n'
'While comparing [0] kw: dict not as expected:\n'
'\n'
'same:\n'
"['y']\n"
'\n'
'values differ:\n'
"'x': 3 != 7"
)
def test_compare_arbitrary_nested_same(self):
compare(SampleClassA([SampleClassB()]),
SampleClassA([SampleClassB()]))
def test_compare_different_vars(self):
obj1 = SampleClassB(1)
obj1.same = 42
obj1.foo = '1'
obj2 = SampleClassB(2)
obj2.same = 42
obj2.bar = '2'
self.check_raises(
obj1, obj2,
"SampleClassB not as expected:\n"
"\n"
"attributes same:\n"
"['same']\n"
"\n"
'attributes in first but not second:\n'
"'foo': '1'\n"
"\n"
'attributes in second but not first:\n'
"'bar': '2'\n"
'\n'
'attributes differ:\n'
"'args': (1,) != (2,)\n"
'\n'
"While comparing .args: sequence not as expected:\n"
'\n'
'same:\n'
'()\n'
'\n'
'first:\n'
'(1,)\n'
'\n'
'second:\n'
'(2,)'
)
def test_compare_arbitrary_nested_diff(self):
class OurClass:
def __init__(self, *args):
self.args = args
def __repr__(self):
return '<OurClass obj>'
self.check_raises(
OurClass(OurClass(1)),
OurClass(OurClass(2)),
"OurClass not as expected:\n"
"\n"
'attributes differ:\n'
"'args': (<OurClass obj>,) != (<OurClass obj>,)\n"
'\n'
'While comparing .args: sequence not as expected:\n'
'\n'
'same:\n'
'()\n'
'\n'
'first:\n'
'(<OurClass obj>,)\n'
'\n'
'second:\n'
'(<OurClass obj>,)\n'
'\n'
'While comparing .args[0]: OurClass not as expected:\n'
'\n'
'attributes differ:\n'
"'args': (1,) != (2,)\n"
'\n'
'While comparing .args[0].args: sequence not as expected:\n'
'\n'
'same:\n'
'()\n'
'\n'
'first:\n'
'(1,)\n'
'\n'
'second:\n'
'(2,)'
)
def test_compare_slotted_same(self):
compare(Slotted(1, 2), Slotted(1, 2))
def test_compare_slotted_diff(self):
self.check_raises(
Slotted(1, 2),
Slotted(1, 3),
"Slotted not as expected:\n"
"\n"
"attributes same:\n"
"['x']\n"
"\n"
'attributes differ:\n'
"'y': 2 != 3"
)
def test_empty_sets(self):
compare(set(), set())
def test_empty_sets_strict(self):
compare(set(), set(), strict=True)
def test_datetime_not_equal(self):
self.check_raises(
datetime(2001, 1, 1),
datetime(2001, 1, 2),
"datetime.datetime(2001, 1, 1, 0, 0) != "
"datetime.datetime(2001, 1, 2, 0, 0)"
)
| |
from itertools import product as cartes
from sympy import (limit, exp, oo, log, sqrt, Limit, sin, floor, cos, ceiling,
atan, gamma, Symbol, S, pi, Integral, cot, Rational, I, zoo,
tan, cot, integrate, Sum, sign)
from sympy.series.limits import heuristics
from sympy.series.order import Order
from sympy.abc import x, y, z
from sympy.utilities.pytest import XFAIL, raises
def test_basic1():
assert limit(x, x, oo) == oo
assert limit(x, x, -oo) == -oo
assert limit(-x, x, oo) == -oo
assert limit(x**2, x, -oo) == oo
assert limit(-x**2, x, oo) == -oo
assert limit(x*log(x), x, 0, dir="+") == 0
assert limit(1/x, x, oo) == 0
assert limit(exp(x), x, oo) == oo
assert limit(-exp(x), x, oo) == -oo
assert limit(exp(x)/x, x, oo) == oo
assert limit(1/x - exp(-x), x, oo) == 0
assert limit(x + 1/x, x, oo) == oo
assert limit(x - x**2, x, oo) == -oo
assert limit((1 + x)**(1 + sqrt(2)), x, 0) == 1
assert limit((1 + x)**oo, x, 0) == oo
assert limit((1 + x)**oo, x, 0, dir='-') == 0
assert limit((1 + x + y)**oo, x, 0, dir='-') == (1 + y)**(oo)
assert limit(y/x/log(x), x, 0) == -oo*sign(y)
assert limit(cos(x + y)/x, x, 0) == sign(cos(y))*oo
raises(NotImplementedError, lambda: limit(Sum(1/x, (x, 1, y)) -
log(y), y, oo))
raises(NotImplementedError, lambda: limit(Sum(1/x, (x, 1, y)) - 1/y, y, oo))
assert limit(gamma(1/x + 3), x, oo) == 2
assert limit(S.NaN, x, -oo) == S.NaN
assert limit(Order(2)*x, x, S.NaN) == S.NaN
assert limit(gamma(1/x + 3), x, oo) == 2
assert limit(S.NaN, x, -oo) == S.NaN
assert limit(Order(2)*x, x, S.NaN) == S.NaN
assert limit(1/(x - 1), x, 1, dir="+") == oo
assert limit(1/(x - 1), x, 1, dir="-") == -oo
assert limit(1/(5 - x)**3, x, 5, dir="+") == -oo
assert limit(1/(5 - x)**3, x, 5, dir="-") == oo
assert limit(1/sin(x), x, pi, dir="+") == -oo
assert limit(1/sin(x), x, pi, dir="-") == oo
assert limit(1/cos(x), x, pi/2, dir="+") == -oo
assert limit(1/cos(x), x, pi/2, dir="-") == oo
assert limit(1/tan(x**3), x, (2*pi)**(S(1)/3), dir="+") == oo
assert limit(1/tan(x**3), x, (2*pi)**(S(1)/3), dir="-") == -oo
assert limit(1/cot(x)**3, x, (3*pi/2), dir="+") == -oo
assert limit(1/cot(x)**3, x, (3*pi/2), dir="-") == oo
# approaching 0
# from dir="+"
assert limit(1 + 1/x, x, 0) == oo
# from dir='-'
# Add
assert limit(1 + 1/x, x, 0, dir='-') == -oo
# Pow
assert limit(x**(-2), x, 0, dir='-') == oo
assert limit(x**(-3), x, 0, dir='-') == -oo
assert limit(1/sqrt(x), x, 0, dir='-') == (-oo)*I
assert limit(x**2, x, 0, dir='-') == 0
assert limit(sqrt(x), x, 0, dir='-') == 0
assert limit((1 + cos(x))**oo, x, 0) == oo
@XFAIL
def test_basic1_xfail():
assert limit(x**-pi, x, 0, dir='-') == zoo
def test_basic2():
assert limit(x**x, x, 0, dir="+") == 1
assert limit((exp(x) - 1)/x, x, 0) == 1
assert limit(1 + 1/x, x, oo) == 1
assert limit(-exp(1/x), x, oo) == -1
assert limit(x + exp(-x), x, oo) == oo
assert limit(x + exp(-x**2), x, oo) == oo
assert limit(x + exp(-exp(x)), x, oo) == oo
assert limit(13 + 1/x - exp(-x), x, oo) == 13
def test_basic3():
assert limit(1/x, x, 0, dir="+") == oo
assert limit(1/x, x, 0, dir="-") == -oo
def test_basic4():
assert limit(2*x + y*x, x, 0) == 0
assert limit(2*x + y*x, x, 1) == 2 + y
assert limit(2*x**8 + y*x**(-3), x, -2) == 512 - y/8
assert limit(sqrt(x + 1) - sqrt(x), x, oo) == 0
assert integrate(1/(x**3 + 1), (x, 0, oo)) == 2*pi*sqrt(3)/9
def test_issue786():
assert limit(x*y + x*z, z, 2) == x*y + 2*x
def test_Limit():
assert Limit(sin(x)/x, x, 0) != 1
assert Limit(sin(x)/x, x, 0).doit() == 1
def test_floor():
assert limit(floor(x), x, -2, "+") == -2
assert limit(floor(x), x, -2, "-") == -3
assert limit(floor(x), x, -1, "+") == -1
assert limit(floor(x), x, -1, "-") == -2
assert limit(floor(x), x, 0, "+") == 0
assert limit(floor(x), x, 0, "-") == -1
assert limit(floor(x), x, 1, "+") == 1
assert limit(floor(x), x, 1, "-") == 0
assert limit(floor(x), x, 2, "+") == 2
assert limit(floor(x), x, 2, "-") == 1
assert limit(floor(x), x, 248, "+") == 248
assert limit(floor(x), x, 248, "-") == 247
def test_floor_requires_robust_assumptions():
assert limit(floor(sin(x)), x, 0, "+") == 0
assert limit(floor(sin(x)), x, 0, "-") == -1
assert limit(floor(cos(x)), x, 0, "+") == 0
assert limit(floor(cos(x)), x, 0, "-") == 0
assert limit(floor(5 + sin(x)), x, 0, "+") == 5
assert limit(floor(5 + sin(x)), x, 0, "-") == 4
assert limit(floor(5 + cos(x)), x, 0, "+") == 5
assert limit(floor(5 + cos(x)), x, 0, "-") == 5
def test_ceiling():
assert limit(ceiling(x), x, -2, "+") == -1
assert limit(ceiling(x), x, -2, "-") == -2
assert limit(ceiling(x), x, -1, "+") == 0
assert limit(ceiling(x), x, -1, "-") == -1
assert limit(ceiling(x), x, 0, "+") == 1
assert limit(ceiling(x), x, 0, "-") == 0
assert limit(ceiling(x), x, 1, "+") == 2
assert limit(ceiling(x), x, 1, "-") == 1
assert limit(ceiling(x), x, 2, "+") == 3
assert limit(ceiling(x), x, 2, "-") == 2
assert limit(ceiling(x), x, 248, "+") == 249
assert limit(ceiling(x), x, 248, "-") == 248
def test_ceiling_requires_robust_assumptions():
assert limit(ceiling(sin(x)), x, 0, "+") == 1
assert limit(ceiling(sin(x)), x, 0, "-") == 0
assert limit(ceiling(cos(x)), x, 0, "+") == 1
assert limit(ceiling(cos(x)), x, 0, "-") == 1
assert limit(ceiling(5 + sin(x)), x, 0, "+") == 6
assert limit(ceiling(5 + sin(x)), x, 0, "-") == 5
assert limit(ceiling(5 + cos(x)), x, 0, "+") == 6
assert limit(ceiling(5 + cos(x)), x, 0, "-") == 6
def test_atan():
x = Symbol("x", real=True)
assert limit(atan(x)*sin(1/x), x, 0) == 0
assert limit(atan(x) + sqrt(x + 1) - sqrt(x), x, oo) == pi/2
def test_abs():
assert limit(abs(x), x, 0) == 0
assert limit(abs(sin(x)), x, 0) == 0
assert limit(abs(cos(x)), x, 0) == 1
assert limit(abs(sin(x + 1)), x, 0) == sin(1)
def test_heuristic():
x = Symbol("x", real=True)
assert heuristics(sin(1/x) + atan(x), x, 0, '+') == sin(oo)
assert limit(log(2 + sqrt(atan(x))*sqrt(sin(1/x))), x, 0) == log(2)
def test_issue772():
z = Symbol("z", positive=True)
f = -1/z*exp(-z*x)
assert limit(f, x, oo) == 0
assert f.limit(x, oo) == 0
def test_exponential():
n = Symbol('n')
x = Symbol('x', real=True)
assert limit((1 + x/n)**n, n, oo) == exp(x)
assert limit((1 + x/(2*n))**n, n, oo) == exp(x/2)
assert limit((1 + x/(2*n + 1))**n, n, oo) == exp(x/2)
assert limit(((x - 1)/(x + 1))**x, x, oo) == exp(-2)
assert limit(1 + (1 + 1/x)**x, x, oo) == 1 + S.Exp1
@XFAIL
def test_exponential2():
n = Symbol('n')
assert limit((1 + x/(n + sin(n)))**n, n, oo) == exp(x)
def test_doit():
f = Integral(2 * x, x)
l = Limit(f, x, oo)
assert l.doit() == oo
@XFAIL
def test_doit2():
f = Integral(2 * x, x)
l = Limit(f, x, oo)
# limit() breaks on the contained Integral.
assert l.doit(deep=False) == l
def test_bug693a():
assert sin(sin(x + 1) + 1).limit(x, 0) == sin(sin(1) + 1)
def test_issue693():
assert limit( (1 - cos(x))/x**2, x, S(1)/2) == 4 - 4*cos(S(1)/2)
assert limit(sin(sin(x + 1) + 1), x, 0) == sin(1 + sin(1))
assert limit(abs(sin(x + 1) + 1), x, 0) == 1 + sin(1)
def test_issue991():
assert limit(1/(x + 3), x, 2) == S(1)/5
assert limit(1/(x + pi), x, 2) == S(1)/(2 + pi)
assert limit(log(x)/(x**2 + 3), x, 2) == log(2)/7
assert limit(log(x)/(x**2 + pi), x, 2) == log(2)/(4 + pi)
def test_issue1448():
assert limit(cot(x), x, 0, dir='+') == oo
assert limit(cot(x), x, pi/2, dir='+') == 0
def test_issue2065():
assert limit(x**0.5, x, oo) == oo**0.5 == oo
assert limit(x**0.5, x, 16) == S(16)**0.5
assert limit(x**0.5, x, 0) == 0
assert limit(x**(-0.5), x, oo) == 0
assert limit(x**(-0.5), x, 4) == S(4)**(-0.5)
def test_issue2084():
# using list(...) so py.test can recalculate values
tests = list(cartes([x, -x],
[-1, 1],
[2, 3, Rational(1, 2), Rational(2, 3)],
['-', '+']))
results = (oo, oo, -oo, oo, -oo*I, oo, -oo*(-1)**Rational(1, 3), oo,
0, 0, 0, 0, 0, 0, 0, 0,
oo, oo, oo, -oo, oo, -oo*I, oo, -oo*(-1)**Rational(1, 3),
0, 0, 0, 0, 0, 0, 0, 0)
assert len(tests) == len(results)
for i, (args, res) in enumerate(zip(tests, results)):
y, s, e, d = args
eq = y**(s*e)
try:
assert limit(eq, x, 0, dir=d) == res
except AssertionError:
if 0: # change to 1 if you want to see the failing tests
print()
print(i, res, eq, d, limit(eq, x, 0, dir=d))
else:
assert None
def test_issue2085():
assert limit(sin(x)/x, x, oo) == 0
assert limit(atan(x), x, oo) == pi/2
assert limit(gamma(x), x, oo) == oo
assert limit(cos(x)/x, x, oo) == 0
assert limit(gamma(x), x, Rational(1, 2)) == sqrt(pi)
def test_issue2130():
assert limit((1 + y)**(1/y) - S.Exp1, y, 0) == 0
def test_issue1447():
# using list(...) so py.test can recalculate values
tests = list(cartes([cot, tan],
[-pi/2, 0, pi/2, pi, 3*pi/2],
['-', '+']))
results = (0, 0, -oo, oo, 0, 0, -oo, oo, 0, 0,
oo, -oo, 0, 0, oo, -oo, 0, 0, oo, -oo)
assert len(tests) == len(results)
for i, (args, res) in enumerate(zip(tests, results)):
f, l, d = args
eq = f(x)
try:
assert limit(eq, x, l, dir=d) == res
except AssertionError:
if 0: # change to 1 if you want to see the failing tests
print()
print(i, res, eq, l, d, limit(eq, x, l, dir=d))
else:
assert None
def test_issue835():
assert limit((1 + x**log(3))**(1/x), x, 0) == 1
assert limit((5**(1/x) + 3**(1/x))**x, x, 0) == 5
def test_calculate_series():
# needs gruntz calculate_series to go to n = 32
assert limit(x**(S(77)/3)/(1 + x**(S(77)/3)), x, oo) == 1
# needs gruntz calculate_series to go to n = 128
assert limit(x**101.1/(1 + x**101.1), x, oo) == 1
def test_issue2856():
assert limit((x**16)/(1 + x**16), x, oo) == 1
assert limit((x**100)/(1 + x**100), x, oo) == 1
assert limit((x**1885)/(1 + x**1885), x, oo) == 1
assert limit((x**1000/((x + 1)**1000 + exp(-x))), x, oo) == 1
def test_newissue():
assert limit(exp(1/sin(x))/exp(cot(x)), x, 0) == 1
def test_extended_real_line():
assert limit(x - oo, x, oo) == -oo
assert limit(oo - x, x, -oo) == oo
assert limit(x**2/(x - 5) - oo, x, oo) == -oo
assert limit(1/(x + sin(x)) - oo, x, 0) == -oo
assert limit(oo/x, x, oo) == oo
assert limit(x - oo + 1/x, x, oo) == -oo
assert limit(x - oo + 1/x, x, 0) == -oo
@XFAIL
def test_order_oo():
from sympy import C
x = Symbol('x', positive=True, bounded=True)
assert C.Order(x)*oo != C.Order(1, x)
assert limit(oo/(x**2 - 4), x, oo) == oo
def test_issue2337():
raises(NotImplementedError, lambda: limit(exp(x*y), x, oo))
raises(NotImplementedError, lambda: limit(exp(-x*y), x, oo))
def test_Limit_dir():
raises(TypeError, lambda: Limit(x, x, 0, dir=0))
raises(ValueError, lambda: Limit(x, x, 0, dir='0'))
def test_polynomial():
assert limit((x + 1)**1000/((x + 1)**1000 + 1), x, oo) == 1
assert limit((x + 1)**1000/((x + 1)**1000 + 1), x, -oo) == 1
def test_rational():
assert limit(1/y - ( 1/(y+x) + x/(y+x)/y )/z,x,oo) == 1/y - 1/(y*z)
assert limit(1/y - ( 1/(y+x) + x/(y+x)/y )/z,x,-oo) == 1/y - 1/(y*z)
def test_issue_2641():
assert limit(log(x)/z - log(2*x)/z, x, 0) == -log(2)/z
def test_issue_3267():
n = Symbol('n', integer=True, positive=True)
r = (n + 1)*x**(n + 1)/(x**(n + 1) - 1) - x/(x - 1)
assert limit(r, x, 1).simplify() == n/2
def test_factorial():
from sympy import factorial, E
f = factorial(x)
assert limit(f, x, oo) == oo
assert limit(x/f, x, oo) == 0
# see Stirling's approximation:
# http://en.wikipedia.org/wiki/Stirling's_approximation
assert limit(f/(sqrt(2*pi*x)*(x/E)**x), x, oo) == 1
assert limit(f, x, -oo) == factorial(-oo)
assert limit(f, x, x**2) == factorial(x**2)
assert limit(f, x, -x**2) == factorial(-x**2)
def test_issue_3461():
e = 5*x**3/4 - 3*x/4 + (y*(3*x**2/2 - S(1)/2) + \
35*x**4/8 - 15*x**2/4 + S(3)/8)/(2*(y + 1))
assert limit(e, y, oo) == (5*x**3 + 3*x**2 - 3*x - 1)/4
def test_issue_2641():
assert limit(log(x)*z - log(2*x)*y, x, 0) == oo*sign(y - z)
def test_issue_2073():
n = Symbol('n')
r = Symbol('r', positive=True)
c = Symbol('c')
p = Symbol('p', positive=True)
m = Symbol('m', negative=True)
expr = ((2*n*(n - r + 1)/(n + r*(n - r + 1)))**c + \
(r - 1)*(n*(n - r + 2)/(n + r*(n - r + 1)))**c - n)/(n**c - n)
expr = expr.subs(c, c + 1)
raises(NotImplementedError, lambda: limit(expr, n, oo))
assert limit(expr.subs(c, m), n, oo) == 1
assert limit(expr.subs(c, p), n, oo).simplify() == \
(2**(p + 1) + r - 1)/(r + 1)**(p + 1)
def test_issue_3989():
a = Symbol('a')
assert limit(sqrt(x/(x + a)), x, oo) == 1
| |
# =================================================================
#
# Authors: Seth Girvin
#
# Copyright (c) 2021 Seth Girvin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
from __future__ import unicode_literals
import sys
import codecs
import warnings
import functools
from collections import OrderedDict
from mappyfile.parser import Parser
from mappyfile.transformer import MapfileToDict
from mappyfile.pprint import PrettyPrinter
from mappyfile.validator import Validator
try:
from itertools import izip_longest as zip_longest # py2
except ImportError:
from itertools import zip_longest # py3
def deprecated(func):
"""
From https://stackoverflow.com/questions/2536307/how-do-i-deprecate-python-functions/30253848#30253848
This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used.
"""
@functools.wraps(func)
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
warnings.warn("Call to deprecated function {}.".format(func.__name__),
category=DeprecationWarning,
stacklevel=2)
warnings.simplefilter('default', DeprecationWarning) # reset filter
return func(*args, **kwargs)
return new_func
def open(fn, expand_includes=True, include_comments=False, include_position=False, **kwargs):
"""
Load a Mapfile from the supplied filename into a Python dictionary.
Parameters
----------
fn: string
The path to the Mapfile, or partial Mapfile
expand_includes: boolean
Load any ``INCLUDE`` files in the MapFile
include_comments: boolean
Include or discard comment strings from the Mapfile - *experimental*
include_position: boolean
Include the position of the Mapfile tokens in the output
Returns
-------
dict
A Python dictionary representing the Mapfile in the mappyfile format
Example
-------
To open a Mapfile from a filename and return it as a dictionary object::
d = mappyfile.open('mymap.map')
Notes
-----
Partial Mapfiles can also be opened, for example a file containing a ``LAYER`` object.
"""
p = Parser(expand_includes=expand_includes,
include_comments=include_comments, **kwargs)
ast = p.parse_file(fn)
m = MapfileToDict(include_position=include_position,
include_comments=include_comments, **kwargs)
d = m.transform(ast)
return d
def load(fp, expand_includes=True, include_position=False, include_comments=False, **kwargs):
"""
Load a Mapfile from an open file or file-like object.
Parameters
----------
fp: file
A file-like object - as with all Mapfiles this should be encoded in "utf-8"
expand_includes: boolean
Load any ``INCLUDE`` files in the MapFile
include_comments: boolean
Include or discard comment strings from the Mapfile - *experimental*
include_position: boolean
Include the position of the Mapfile tokens in the output
Returns
-------
dict
A Python dictionary representing the Mapfile in the mappyfile format
Example
-------
To open a Mapfile from a file and return it as a dictionary object::
with open('mymap.map') as fp:
d = mappyfile.load(fp)
Notes
-----
Partial Mapfiles can also be opened, for example a file containing a ``LAYER`` object.
"""
p = Parser(expand_includes=expand_includes,
include_comments=include_comments, **kwargs)
ast = p.load(fp)
m = MapfileToDict(include_position=include_position,
include_comments=include_comments, **kwargs)
d = m.transform(ast)
return d
def loads(s, expand_includes=True, include_position=False, include_comments=False, **kwargs):
"""
Load a Mapfile from a string
Parameters
----------
s: string
The Mapfile, or partial Mapfile, text
expand_includes: boolean
Load any ``INCLUDE`` files in the MapFile
include_comments: boolean
Include or discard comment strings from the Mapfile - *experimental*
include_position: boolean
Include the position of the Mapfile tokens in the output
Returns
-------
dict
A Python dictionary representing the Mapfile in the mappyfile format
Example
-------
To open a Mapfile from a string and return it as a dictionary object::
s = '''MAP NAME "TEST" END'''
d = mappyfile.loads(s)
assert d["name"] == "TEST"
"""
p = Parser(expand_includes=expand_includes,
include_comments=include_comments, **kwargs)
ast = p.parse(s)
m = MapfileToDict(include_position=include_position,
include_comments=include_comments, **kwargs)
d = m.transform(ast)
return d
def dump(d, fp, indent=4, spacer=" ", quote='"', newlinechar="\n", end_comment=False,
align_values=False, separate_complex_types=False):
"""
Write d (the Mapfile dictionary) as a formatted stream to fp
Parameters
----------
d: dict
A Python dictionary based on the the mappyfile schema
fp: file
A file-like object
indent: int
The number of ``spacer`` characters to indent structures in the Mapfile
spacer: string
The character to use for indenting structures in the Mapfile. Typically
spaces or tab characters (``\\t``)
quote: string
The quote character to use in the Mapfile (double or single quotes)
newlinechar: string
The character used to insert newlines in the Mapfile
end_comment: bool
Add a comment with the block type at each closing END
statement e.g. END # MAP
align_values: bool
Aligns the values in the same column for better readability. The column is
multiple of indent and determined by the longest key
separate_complex_types: bool
Groups composites (complex mapserver definitions with "END") together at the end.
Keeps the given order except that all simple key-value pairs appear before composites.
Example
-------
To open a Mapfile from a string, and then dump it back out to an open file,
using 2 spaces for indentation, and single-quotes for properties::
s = '''MAP NAME "TEST" END'''
d = mappyfile.loads(s)
with open(fn, "w") as f:
mappyfile.dump(d, f, indent=2, quote="'")
"""
map_string = _pprint(d, indent, spacer, quote, newlinechar, end_comment, align_values, separate_complex_types)
fp.write(map_string)
def save(d, output_file, indent=4, spacer=" ", quote='"', newlinechar="\n", end_comment=False,
align_values=False, separate_complex_types=False, **kwargs):
"""
Write a dictionary to an output Mapfile on disk
Parameters
----------
d: dict
A Python dictionary based on the the mappyfile schema
output_file: string
The output filename
indent: int
The number of ``spacer`` characters to indent structures in the Mapfile
spacer: string
The character to use for indenting structures in the Mapfile. Typically
spaces or tab characters (``\\t``)
quote: string
The quote character to use in the Mapfile (double or single quotes)
newlinechar: string
The character used to insert newlines in the Mapfile
end_comment: bool
Add a comment with the block type at each closing END
statement e.g. END # MAP
align_values: bool
Aligns the values in the same column for better readability. The column is
multiple of indent and determined by the longest key.
separate_complex_types: bool
Groups composites (complex mapserver definitions with "END") together at the end.
Keeps the given order except that all simple key-value pairs appear before composites.
Returns
-------
string
The output_file passed into the function
Example
-------
To open a Mapfile from a string, and then save it to a file::
s = '''MAP NAME "TEST" END'''
d = mappyfile.loads(s)
fn = "C:/Data/mymap.map"
mappyfile.save(d, fn)
"""
map_string = _pprint(d, indent, spacer, quote, newlinechar, end_comment, align_values, separate_complex_types)
_save(output_file, map_string)
return output_file
def dumps(d, indent=4, spacer=" ", quote='"', newlinechar="\n", end_comment=False,
align_values=False, separate_complex_types=False, **kwargs):
"""
Output a Mapfile dictionary as a string
Parameters
----------
d: dict
A Python dictionary based on the the mappyfile schema
indent: int
The number of ``spacer`` characters to indent structures in the Mapfile
spacer: string
The character to use for indenting structures in the Mapfile. Typically
spaces or tab characters (``\\t``)
quote: string
The quote character to use in the Mapfile (double or single quotes)
newlinechar: string
The character used to insert newlines in the Mapfile
end_comment: bool
Add a comment with the block type at each closing END
statement e.g. END # MAP
align_values: bool
Aligns the values in the same column for better readability. The column is
multiple of indent and determined by the longest key
separate_complex_types: bool
Groups composites (complex mapserver definitions with "END") together at the end.
Keeps the given order except that all simple key-value pairs appear before composites.
Returns
-------
string
The Mapfile as a string
Example
-------
To open a Mapfile from a string, and then print it back out
as a string using tabs::
s = '''MAP NAME "TEST" END'''
d = mappyfile.loads(s)
print(mappyfile.dumps(d, indent=1, spacer="\\t"))
"""
return _pprint(d, indent, spacer, quote, newlinechar, end_comment, align_values, separate_complex_types, **kwargs)
def find(lst, key, value):
"""
Find an item in a list of dicts using a key and a value
Parameters
----------
list: list
A list of composite dictionaries e.g. ``layers``, ``classes``
key: string
The key name to search each dictionary in the list
key: value
The value to search for
Returns
-------
dict
The first composite dictionary object with a key that matches the value
Example
-------
To find the ``LAYER`` in a list of layers with ``NAME`` set to ``Layer2``::
s = '''
MAP
LAYER
NAME "Layer1"
TYPE POLYGON
END
LAYER
NAME "Layer2"
TYPE POLYGON
CLASS
NAME "Class1"
COLOR 0 0 -8
END
END
END
'''
d = mappyfile.loads(s)
cmp = mappyfile.find(d["layers"], "name", "Layer2")
assert cmp["name"] == "Layer2"
"""
return next((item for item in lst if item[key.lower()] == value), None)
def findall(lst, key, value):
"""
Find all items in lst where key matches value.
For example find all ``LAYER`` s in a ``MAP`` where ``GROUP`` equals ``VALUE``
Parameters
----------
list: list
A list of composite dictionaries e.g. ``layers``, ``classes``
key: string
The key name to search each dictionary in the list
key: value
The value to search for
Returns
-------
list
A Python list containing the matching composite dictionaries
Example
-------
To find all ``LAYER`` s with ``GROUP`` set to ``test``::
s = '''
MAP
LAYER
NAME "Layer1"
TYPE POLYGON
GROUP "test"
END
LAYER
NAME "Layer2"
TYPE POLYGON
GROUP "test1"
END
LAYER
NAME "Layer3"
TYPE POLYGON
GROUP "test2"
END
LAYER
NAME "Layer4"
TYPE POLYGON
GROUP "test"
END
END
'''
d = mappyfile.loads(s)
layers = mappyfile.findall(d["layers"], "group", "test")
assert len(layers) == 2
"""
return [item for item in lst if item[key.lower()] in value]
def findunique(lst, key):
"""
Find all unique key values for items in lst.
Parameters
----------
lst: list
A list of composite dictionaries e.g. ``layers``, ``classes``
key: string
The key name to search each dictionary in the list
Returns
-------
list
A sorted Python list of unique keys in the list
Example
-------
To find all ``GROUP`` values for ``CLASS`` in a ``LAYER``::
s = '''
LAYER
CLASS
GROUP "group1"
NAME "Class1"
COLOR 0 0 0
END
CLASS
GROUP "group2"
NAME "Class2"
COLOR 0 0 0
END
CLASS
GROUP "group1"
NAME "Class3"
COLOR 0 0 0
END
END
'''
d = mappyfile.loads(s)
groups = mappyfile.findunique(d["classes"], "group")
assert groups == ["group1", "group2"]
"""
return sorted(set([item[key.lower()] for item in lst]))
def findkey(d, *keys):
"""
Get a value from a dictionary based on a list of keys and/or list indexes.
Parameters
----------
d: dict
A Python dictionary
keys: list
A list of key names, or list indexes
Returns
-------
dict
The composite dictionary object at the path specified by the keys
Example
-------
To return the value of the first class of the first layer in a Mapfile::
s = '''
MAP
LAYER
NAME "Layer1"
TYPE POLYGON
CLASS
NAME "Class1"
COLOR 0 0 255
END
END
END
'''
d = mappyfile.loads(s)
pth = ["layers", 0, "classes", 0]
cls1 = mappyfile.findkey(d, *pth)
assert cls1["name"] == "Class1"
"""
if keys:
keys = list(keys)
key = keys.pop(0)
return findkey(d[key], *keys)
else:
return d
def update(d1, d2):
"""
Update dict d1 with properties from d2
Note
----
Allows deletion of objects with a special ``__delete__`` key
For any list of dicts new items can be added when updating
Parameters
----------
d1: dict
A Python dictionary
d2: dict
A Python dictionary that will be used to update any keys with the same name in d1
Returns
-------
dict
The updated dictionary
"""
NoneType = type(None)
if d2.get("__delete__", False):
return {}
for k, v in d2.items():
if isinstance(v, dict):
if v.get("__delete__", False):
# allow a __delete__ property to be set to delete objects
del d1[k]
else:
d1[k] = update(d1.get(k, {}), v)
elif isinstance(v, (tuple, list)) and all(isinstance(li, (NoneType, dict)) for li in v):
# a list of dicts and/or NoneType
orig_list = d1.get(k, [])
new_list = []
pairs = list(zip_longest(orig_list, v, fillvalue=None))
for orig_item, new_item in pairs:
if orig_item is None:
orig_item = {} # can't use {} for fillvalue as only one dict created/modified!
if new_item is None:
new_item = {}
if new_item.get("__delete__", False):
d = None # orig_list.remove(orig_item) # remove the item to delete
else:
d = update(orig_item, new_item)
if d is not None:
new_list.append(d)
d1[k] = new_list
else:
if k in d1 and v == "__delete__":
del d1[k]
else:
d1[k] = v
return d1
def validate(d, version=None):
"""
Validate a mappyfile dictionary by using the Mapfile schema.
An optional version number can be used to specify a specific
a Mapfile is valid for a specific MapServer version.
Parameters
----------
d: dict
A Python dictionary based on the the mappyfile schema
version: float
The MapServer version number used to validate the Mapfile
Returns
-------
list
A list containing validation errors
"""
v = Validator()
return v.validate(d, version=version)
def _save(output_file, string):
with codecs.open(output_file, "w", encoding="utf-8") as f:
f.write(string)
def _pprint(d, indent, spacer, quote, newlinechar, end_comment, align_values, separate_complex_types, **kwargs):
pp = PrettyPrinter(indent=indent, spacer=spacer,
quote=quote, newlinechar=newlinechar,
end_comment=end_comment, align_values=align_values,
separate_complex_types=separate_complex_types, **kwargs)
return pp.pprint(d)
def create(type, version=None):
"""
Create a new mappyfile object, using MapServer defaults (if any).
Parameters
----------
s: type
The mappyfile type to be stored in the __type__ property
Returns
-------
dict
A Python dictionary representing the Mapfile object in the mappyfile format
"""
# get the schema for this type
v = Validator()
try:
schema = v.get_versioned_schema(version=version, schema_name=type)
except IOError:
raise SyntaxError("The mappyfile type '{}' does not exist!".format(type))
d = OrderedDict()
d["__type__"] = type
properties = sorted(schema["properties"].items())
for k, v in properties:
if "default" in v:
d[k] = v["default"]
return d
def dict_move_to_end(ordered_dict, key):
if sys.version_info[0] < 3:
# mappyfile requires Python >= 2.7,
# so this should be safe
val = ordered_dict[key]
del ordered_dict[key]
ordered_dict[key] = val
else:
ordered_dict.move_to_end(key)
| |
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from hordak.models import Account
from hordak.tests.utils import DataProvider
from swiftwind.housemates.forms import HousemateUpdateForm
from swiftwind.housemates.models import Housemate
class HousemateCreateViewTestCase(DataProvider, TestCase):
def setUp(self):
self.login()
self.view_url = reverse('housemates:create')
self.user = User.objects.create(
username='testuser',
email='test@example.com',
first_name='Joe',
last_name='Bloggs',
)
self.parent_account = self.account(
name='Housemate Income',
type=Account.TYPES.income,
)
def test_get(self):
response = self.client.get(self.view_url)
self.assertIn('form', response.context)
def test_post_success_existing_user_existing_account(self):
account = self.account(
name='Existing Account',
parent=self.parent_account,
)
response = self.client.post(self.view_url, data=dict(
existing_user=self.user.username,
account=account.uuid,
))
if response.context and 'form' in response.context:
self.assertFalse(response.context['form'].errors)
self.assertEqual(Housemate.objects.count(), 1)
housemate = Housemate.objects.get()
self.assertEqual(housemate.user, self.user)
self.assertEqual(housemate.account, account)
self.assertEqual(housemate.account.type, Account.TYPES.income)
def test_post_success_existing_user_new_account(self):
response = self.client.post(self.view_url, data=dict(
existing_user=self.user.username,
))
if response.context and 'form' in response.context:
self.assertFalse(response.context['form'].errors)
self.assertEqual(Housemate.objects.count(), 1)
housemate = Housemate.objects.get()
self.assertEqual(housemate.user, self.user)
self.assertEqual(housemate.account.parent, self.parent_account)
self.assertEqual(housemate.account.name, 'Joe Bloggs')
self.assertEqual(housemate.account.type, Account.TYPES.income)
def test_post_success_new_user_existing_account(self):
account = self.account(
name='Existing Account',
parent=self.parent_account,
)
response = self.client.post(self.view_url, data=dict(
new_username='newuser',
new_email='new@example.com',
new_first_name='New',
new_last_name='User',
account=account.uuid,
))
if response.context and 'form' in response.context:
self.assertFalse(response.context['form'].errors)
account.refresh_from_db()
self.assertEqual(Housemate.objects.count(), 1)
housemate = Housemate.objects.get()
self.assertEqual(housemate.user.username, 'newuser')
self.assertEqual(housemate.user.email, 'new@example.com')
self.assertEqual(housemate.user.first_name, 'New')
self.assertEqual(housemate.user.last_name, 'User')
self.assertEqual(housemate.account, account)
self.assertEqual(housemate.account.name, 'Existing Account')
self.assertEqual(housemate.account.currencies, ['EUR'])
self.assertEqual(housemate.account.type, Account.TYPES.income)
@override_settings(DEFAULT_CURRENCY='EUR')
def test_post_success_new_user_new_account(self):
account = self.account(
name='Existing Account',
parent=self.parent_account,
)
response = self.client.post(self.view_url, data=dict(
new_username='newuser',
new_email='new@example.com',
new_first_name='New',
new_last_name='User',
))
if response.context and 'form' in response.context:
self.assertFalse(response.context['form'].errors)
account.refresh_from_db()
self.assertEqual(Housemate.objects.count(), 1)
housemate = Housemate.objects.get()
self.assertEqual(housemate.user.username, 'newuser')
self.assertEqual(housemate.user.email, 'new@example.com')
self.assertEqual(housemate.user.first_name, 'New')
self.assertEqual(housemate.user.last_name, 'User')
self.assertEqual(housemate.account.parent, self.parent_account)
self.assertEqual(housemate.account.name, 'New User')
self.assertEqual(housemate.account.currencies, ['EUR'])
self.assertEqual(housemate.account.type, Account.TYPES.income)
def test_post_success_existing_user_no_name(self):
self.user.first_name = ''
self.user.last_name = ''
self.user.save()
response = self.client.post(self.view_url, data=dict(
existing_user=self.user.username,
))
if response.context and 'form' in response.context:
self.assertFalse(response.context['form'].errors)
housemate = Housemate.objects.get()
# Check the account name is set to the username, rather than just being blank
self.assertEqual(housemate.account.name, 'testuser')
class HousemateUpdateViewTestCase(DataProvider, TestCase):
def setUp(self):
self.login()
self.user = User.objects.create(
username='testuser',
email='test@example.com',
first_name='Joe',
last_name='Bloggs',
)
self.parent_account = self.account(
name='Housemate Income',
type=Account.TYPES.income,
)
self.housemate = Housemate.objects.create(
user=self.user,
account=self.account('Joe Blogs', parent=self.parent_account)
)
self.view_url = reverse('housemates:update', args=[self.housemate.uuid])
def test_get(self):
response = self.client.get(self.view_url)
self.assertIn('form', response.context)
def test_simple(self):
response = self.client.post(self.view_url, data=dict(
username='newusername',
email='new@email.com',
first_name='Jim',
last_name='Smith',
))
if response.context and 'form' in response.context:
self.assertFalse(response.context['form'].errors)
self.assertEqual(Housemate.objects.count(), 1)
housemate = Housemate.objects.get()
user = housemate.user
account = housemate.account
self.assertEqual(user.username, 'newusername')
self.assertEqual(user.email, 'new@email.com')
self.assertEqual(user.first_name, 'Jim')
self.assertEqual(user.last_name, 'Smith')
self.assertEqual(account.name, 'Jim Smith')
class HousemateUpdateFormTestCase(DataProvider, TestCase):
def setUp(self):
self.other_user = User.objects.create(
username='otheruser',
email='other@user.com',
first_name='Other',
last_name='User',
)
self.user = User.objects.create(
username='testuser',
email='test@example.com',
first_name='Joe',
last_name='Bloggs',
)
self.parent_account = self.account(
name='Housemate Income',
type=Account.TYPES.income,
)
self.housemate = Housemate.objects.create(
user=self.user,
account=self.account('Joe Blogs', parent=self.parent_account)
)
def test_valid(self):
form = HousemateUpdateForm(data=dict(
username='testuser',
email='test@example.com',
first_name='Joe',
last_name='Bloggs',
), instance=self.housemate)
self.assertTrue(form.is_valid(), form.errors)
def test_username_in_use(self):
form = HousemateUpdateForm(data=dict(
username='otheruser',
email='test@example.com',
first_name='Joe',
last_name='Bloggs',
), instance=self.housemate)
self.assertFalse(form.is_valid())
def test_email_in_use(self):
form = HousemateUpdateForm(data=dict(
username='testuser',
email='other@user.com',
first_name='Joe',
last_name='Bloggs',
), instance=self.housemate)
self.assertFalse(form.is_valid())
| |
"""
This module performs all basic DFA operations.
It is an interface for pyfst.
"""
# /usr/bin/python
from operator import attrgetter
import fst
from alphabet import createalphabet
EPSILON = fst.EPSILON
def TropicalWeight(param):
"""
Returns fst TropicalWeight
Args:
param (str): The input
Returns:
bool: The arc weight
"""
return fst.TropicalWeight(param)
class FstDFA(fst.StdAcceptor):
"""
Contains extra method to consume input and produce outputs.
The underline library is pyfst, the python bindings of openFST library.
"""
def __init__(self, alphabet = createalphabet()):
"""
Args:
alphabet (list): pyfst input symbol list
Returns:
None
"""
isyms = None
self.alphabet = alphabet
fst.StdAcceptor.__init__(self, isyms)
num = 1
for char in self.alphabet:
self.isyms.__setitem__(char, num)
num = num + 1
def fixminimized(self, alphabet):
"""
After pyfst minimization,
all unused arcs are removed,
and all sink states are removed.
However this may break compatibility.
Args:
alphabet (list): The input alphabet
Returns:
None
"""
endstate = len(list(self.states))
for state in self.states:
for char in alphabet:
found = 0
for arc in state.arcs:
if self.isyms.find(arc.ilabel) == char:
found = 1
break
if found == 0:
self.add_arc(state.stateid, endstate, char)
self[endstate].final = TropicalWeight(float('inf'))
for char in alphabet:
self.add_arc(endstate, endstate, char)
def _addsink(self, alphabet):
"""
Adds a sink state
Args:
alphabet (list): The input alphabet
Returns:
None
"""
endstate = len(list(self.states))
for state in self.states:
for char in alphabet:
found = 0
for arc in state.arcs:
if self.isyms.find(arc.ilabel) == char:
found = 1
break
if found == 0:
self.add_arc(state.stateid, endstate, char)
self[endstate].final = TropicalWeight(float('inf'))
for char in alphabet:
self.add_arc(endstate, endstate, char)
def _path_to_str(self, path):
"""
Convert a path to the string representing the path
Args:
path (tuple): A tuple of arcs
Returns:
inp (str): The path concatenated as as string
"""
inp = ''
for arc in path:
i = self.isyms.find(arc.ilabel)
# Ignore \epsilon transitions both on input
if i != fst.EPSILON:
inp += i
return inp
def init_from_acceptor(self, acceptor):
"""
Adds a sink state
Args:
alphabet (list): The input alphabet
Returns:
None
"""
states = sorted(
acceptor.states,
key=attrgetter('initial'),
reverse=True)
for state in states:
for arc in state.arcs:
itext = acceptor.isyms.find(arc.ilabel)
if itext in self.alphabet:
self.add_arc(state.stateid, arc.nextstate, itext)
if state.final:
self[state.stateid].final = True
if state.initial:
self[state.stateid].initial = True
def consume_input(self, inp):
"""
Return True/False if the machine accepts/reject the input.
Args:
inp (str): input string to be consumed
Returns:
bool: A true or false value depending on if the DFA
accepts the provided input
"""
cur_state = sorted(
self.states,
key=attrgetter('initial'),
reverse=True)[0]
while len(inp) > 0:
found = False
for arc in cur_state.arcs:
if self.isyms.find(arc.ilabel) == inp[0]:
cur_state = self[arc.nextstate]
inp = inp[1:]
found = True
break
if not found:
return False
return cur_state.final != TropicalWeight(float('inf'))
def empty(self):
"""""
Return True if the DFA accepts the empty language.
"""
return len(list(self.states)) == 0
def random_strings(self, string_length=1):
"""
Generate string_length random strings that belong to the automaton.
Args:
string_length (integer): The size of the random string
Returns:
str: The generated string
"""
str_list = []
for path in self.uniform_generate(string_length):
str_list.append(self._path_to_str(path))
return str_list
def complement(self, alphabet):
"""
Generate the complement of a DFA automaton
Args:
alphabet (list): The input alphabet
Returns:
None
"""
self._addsink(alphabet)
states = sorted(self.states, key=attrgetter('initial'), reverse=True)
for state in states:
if state.final:
state.final = False
else:
state.final = True
def save(self, txt_fst_filename):
"""
Save the machine in the openFST format in the file denoted by
txt_fst_filename.
Args:
txt_fst_filename (str): The name of the file
Returns:
None
"""
txt_fst = open(txt_fst_filename, 'w+')
states = sorted(self.states, key=attrgetter('initial'), reverse=True)
for state in states:
for arc in state.arcs:
itext = self.isyms.find(arc.ilabel)
otext = self.osyms.find(arc.ilabel)
txt_fst.write(
'{}\t{}\t{}\t{}\n'.format(
state.stateid,
arc.nextstate,
itext.encode('hex'),
otext.encode('hex')))
if state.final:
txt_fst.write('{}\n'.format(state.stateid))
txt_fst.close()
def load(self, txt_fst_filename):
"""
Save the transducer in the text file format of OpenFST.
The format is specified as follows:
arc format: src dest ilabel olabel [weight]
final state format: state [weight]
lines may occur in any order except initial state must be first line
Args:
txt_fst_filename (string): The name of the file
Returns:
None
"""
with open(txt_fst_filename, 'r') as txt_fst:
for line in txt_fst:
line = line.strip()
splitted_line = line.split()
if len(splitted_line) == 1:
self[int(splitted_line[0])].final = True
else:
self.add_arc(int(splitted_line[0]), int(
splitted_line[1]), splitted_line[2].decode('hex'))
| |
from django import forms
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import Group, Permission
from django.forms.models import inlineformset_factory
from wagtail.wagtailcore import hooks
from wagtail.wagtailadmin.widgets import AdminPageChooser
from wagtail.wagtailusers.models import UserProfile
from wagtail.wagtailcore.models import UserPagePermissionsProxy, GroupPagePermission
User = get_user_model()
# The standard fields each user model is expected to have, as a minimum.
standard_fields = set(['email', 'first_name', 'last_name', 'is_superuser', 'groups'])
class UsernameForm(forms.ModelForm):
"""
Intelligently sets up the username field if it is infact a username. If the
User model has been swapped out, and the username field is an email or
something else, dont touch it.
"""
def __init__(self, *args, **kwargs):
super(UsernameForm, self).__init__(*args, **kwargs)
if User.USERNAME_FIELD == 'username':
field = self.fields['username']
field.regex = r"^[\w.@+-]+$"
field.help_text = _("Required. 30 characters or fewer. Letters, "
"digits and @/./+/-/_ only.")
field.error_messages = field.error_messages.copy()
field.error_messages.update({
'invalid': _("This value may contain only letters, numbers "
"and @/./+/-/_ characters.")})
@property
def username_field(self):
return self[User.USERNAME_FIELD]
def separate_username_field(self):
return User.USERNAME_FIELD not in standard_fields
class UserCreationForm(UsernameForm):
required_css_class = "required"
is_superuser = forms.BooleanField(
label=_("Administrator"),
required=False,
help_text=_("If ticked, this user has the ability to manage user accounts.")
)
password1 = forms.CharField(
label=_("Password"),
required=False,
widget=forms.PasswordInput,
help_text=_("Leave blank if not changing."))
password2 = forms.CharField(
label=_("Password confirmation"), required=False,
widget=forms.PasswordInput,
help_text=_("Enter the same password as above, for verification."))
email = forms.EmailField(required=True, label=_("Email"))
first_name = forms.CharField(required=True, label=_("First Name"))
last_name = forms.CharField(required=True, label=_("Last Name"))
class Meta:
model = User
fields = set([User.USERNAME_FIELD]) | standard_fields
widgets = {
'groups': forms.CheckboxSelectMultiple
}
def clean_username(self):
username_field = User.USERNAME_FIELD
username = self.cleaned_data[username_field]
try:
User._default_manager.get(**{username_field: username})
except User.DoesNotExist:
return username
raise forms.ValidationError(
self.error_messages['duplicate_username'],
code='duplicate_username',
)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
# users can access django-admin iff they are a superuser
user.is_staff = user.is_superuser
if commit:
user.save()
self.save_m2m()
return user
# Largely the same as django.contrib.auth.forms.UserCreationForm, but with enough subtle changes
# (to make password non-required) that it isn't worth inheriting...
class UserEditForm(UsernameForm):
required_css_class = "required"
error_messages = {
'duplicate_username': _("A user with that username already exists."),
'password_mismatch': _("The two password fields didn't match."),
}
email = forms.EmailField(required=True, label=_("Email"))
first_name = forms.CharField(required=True, label=_("First Name"))
last_name = forms.CharField(required=True, label=_("Last Name"))
password1 = forms.CharField(
label=_("Password"),
required=False,
widget=forms.PasswordInput,
help_text=_("Leave blank if not changing."))
password2 = forms.CharField(
label=_("Password confirmation"), required=False,
widget=forms.PasswordInput,
help_text=_("Enter the same password as above, for verification."))
is_superuser = forms.BooleanField(
label=_("Administrator"),
required=False,
help_text=_("Administrators have the ability to manage user accounts.")
)
class Meta:
model = User
fields = set([User.USERNAME_FIELD, "is_active"]) | standard_fields
widgets = {
'groups': forms.CheckboxSelectMultiple
}
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
username_field = User.USERNAME_FIELD
try:
User._default_manager.exclude(id=self.instance.id).get(**{
username_field: username})
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
user = super(UserEditForm, self).save(commit=False)
# users can access django-admin iff they are a superuser
user.is_staff = user.is_superuser
if self.cleaned_data["password1"]:
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
self.save_m2m()
return user
class GroupForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(GroupForm, self).__init__(*args, **kwargs)
self.registered_permissions = Permission.objects.none()
for fn in hooks.get_hooks('register_permissions'):
self.registered_permissions = self.registered_permissions | fn()
self.fields['permissions'].queryset = self.registered_permissions
required_css_class = "required"
error_messages = {
'duplicate_name': _("A group with that name already exists."),
}
is_superuser = forms.BooleanField(
label=_("Administrator"),
required=False,
help_text=_("Administrators have the ability to manage user accounts.")
)
class Meta:
model = Group
fields = ("name", "permissions", )
def clean_name(self):
# Since Group.name is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
name = self.cleaned_data["name"]
try:
Group._default_manager.exclude(id=self.instance.id).get(name=name)
except Group.DoesNotExist:
return name
raise forms.ValidationError(self.error_messages['duplicate_name'])
def save(self):
# We go back to the object to read (in order to reapply) the
# permissions which were set on this group, but which are not
# accessible in the wagtail admin interface, as otherwise these would
# be clobbered by this form.
try:
untouchable_permissions = self.instance.permissions.exclude(pk__in=self.registered_permissions)
bool(untouchable_permissions) # force this to be evaluated, as it's about to change
except ValueError:
# this form is not bound; we're probably creating a new group
untouchable_permissions = []
group = super(GroupForm, self).save()
group.permissions.add(*untouchable_permissions)
return group
class GroupPagePermissionForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(GroupPagePermissionForm, self).__init__(*args, **kwargs)
self.fields['page'].widget = AdminPageChooser()
class Meta:
model = GroupPagePermission
fields = ('page', 'permission_type')
class BaseGroupPagePermissionFormSet(forms.models.BaseInlineFormSet):
def __init__(self, *args, **kwargs):
super(BaseGroupPagePermissionFormSet, self).__init__(*args, **kwargs)
self.form = GroupPagePermissionForm
for form in self.forms:
form.fields['DELETE'].widget = forms.HiddenInput()
@property
def empty_form(self):
empty_form = super(BaseGroupPagePermissionFormSet, self).empty_form
empty_form.fields['DELETE'].widget = forms.HiddenInput()
return empty_form
GroupPagePermissionFormSet = inlineformset_factory(
Group,
GroupPagePermission,
formset=BaseGroupPagePermissionFormSet,
extra=0,
fields=('page', 'permission_type'),
)
class NotificationPreferencesForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(NotificationPreferencesForm, self).__init__(*args, **kwargs)
user_perms = UserPagePermissionsProxy(self.instance.user)
if not user_perms.can_publish_pages():
del self.fields['submitted_notifications']
if not user_perms.can_edit_pages():
del self.fields['approved_notifications']
del self.fields['rejected_notifications']
class Meta:
model = UserProfile
fields = ("submitted_notifications", "approved_notifications", "rejected_notifications")
| |
#
# PythonImage.py -- Abstraction of an generic data image.
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import sys, time
import numpy
import mimetypes
import os
import hashlib
try:
# do we have Python Imaging Library available?
import PIL.Image as Image
from PIL.ExifTags import TAGS
have_pil = True
except ImportError:
have_pil = False
# We only need one of { have_pilutil, have_qtimage }, but both have
# their strengths
try:
from scipy.misc import pilutil, imsave
have_pilutil = True
except ImportError:
have_pilutil = False
try:
from ginga.qtw.QtHelp import QtCore, QtGui
QImage, QColor = QtGui.QImage, QtGui.QColor
have_qtimage = True
except ImportError, e:
have_qtimage = False
try:
# How about color management (ICC profile) support?
import ImageCms
have_cms = True
except ImportError:
have_cms = False
# For testing...
#have_qtimage = False
#have_pilutil = False
#have_pil = False
#have_cms = False
from ginga.misc import Bunch
from ginga.BaseImage import BaseImage, ImageError
try:
basedir = os.environ['GINGA_HOME']
except KeyError:
basedir = os.path.join(os.environ['HOME'], '.ginga')
working_profile = os.path.join(basedir, "profiles", "working.icc")
class PythonImage(BaseImage):
def load_file(self, filepath):
kwds = {}
metadata = { 'exif': {}, 'path': filepath }
data_np = self._imload(filepath, kwds)
self.set_data(data_np, metadata=metadata)
self.set(exif=kwds)
def save_file_as(self, filepath):
if not have_pil:
raise ImageError("Install PIL to be able to save images")
data = self.get_data()
imsave(filepath, data)
def copy(self, astype=None):
other = PythonImage()
self.transfer(other, astype=astype)
return other
def get_scaled_cutout_wdht(self, x1, y1, x2, y2, new_wd, new_ht,
method='bicubic'):
# calculate dimensions of NON-scaled cutout
old_wd = x2 - x1 + 1
old_ht = y2 - y1 + 1
self.logger.debug("old=%dx%d new=%dx%d" % (
old_wd, old_ht, new_wd, new_ht))
data = self.get_data()
newdata = data[y1:y2+1, x1:x2+1]
newdata = self._imresize(newdata, new_wd, new_ht, method=method)
ht, wd = newdata.shape[:2]
scale_x = float(wd) / old_wd
scale_y = float(ht) / old_ht
res = Bunch.Bunch(data=newdata, org_fac=1,
scale_x=scale_x, scale_y=scale_y)
return res
def get_scaled_cutout_pil(self, x1, y1, x2, y2, scale_x, scale_y,
method='bicubic'):
# calculate dimensions of NON-scaled cutout
old_wd = x2 - x1 + 1
old_ht = y2 - y1 + 1
new_wd = int(round(scale_x * old_wd))
new_ht = int(round(scale_y * old_ht))
self.logger.debug("old=%dx%d new=%dx%d" % (
old_wd, old_ht, new_wd, new_ht))
data = self.get_data()
newdata = data[y1:y2+1, x1:x2+1]
newdata = self._imresize(newdata, new_wd, new_ht, method=method)
ht, wd = newdata.shape[:2]
scale_x = float(wd) / old_wd
scale_y = float(ht) / old_ht
res = Bunch.Bunch(data=newdata, org_fac=1,
scale_x=scale_x, scale_y=scale_y)
return res
def get_scaled_cutout(self, x1, y1, x2, y2, scale_x, scale_y,
method=None):
if method == None:
if (have_pilutil or have_qtimage):
method = 'bicubic'
else:
method = 'basic'
if method == 'basic':
return self.get_scaled_cutout_basic(x1, y1, x2, y2,
scale_x, scale_y)
return self.get_scaled_cutout_pil(x1, y1, x2, y2,
scale_x, scale_y,
method=method)
def _imload(self, filepath, kwds):
"""Load an image file, guessing the format, and return a numpy
array containing an RGB image. If EXIF keywords can be read
they are returned in the dict _kwds_.
"""
start_time = time.time()
typ, enc = mimetypes.guess_type(filepath)
if not typ:
typ = 'image/jpeg'
typ, subtyp = typ.split('/')
self.logger.debug("MIME type is %s/%s" % (typ, subtyp))
if (typ == 'image') and (subtyp in ('x-portable-pixmap',
'x-portable-greymap')):
# Special opener for PPM files, preserves high bit depth
means = 'built-in'
data_np = open_ppm(filepath)
elif have_pil:
# PIL seems to be the faster loader than QImage, and can
# return EXIF info, where QImage will not.
means = 'PIL'
image = Image.open(filepath)
try:
info = image._getexif()
for tag, value in info.items():
kwd = TAGS.get(tag, tag)
kwds[kwd] = value
except Exception, e:
self.logger.warn("Failed to get image metadata: %s" % (str(e)))
# If we have a working color profile then handle any embedded
# profile or color space information, if possible
if have_cms and os.path.exists(working_profile):
# Assume sRGB image, unless we learn to the contrary
in_profile = os.path.join(basedir, "profiles", "sRGB.icc")
try:
if image.info.has_key('icc_profile'):
self.logger.debug("image has embedded color profile")
buf_profile = image.info['icc_profile']
# Write out embedded profile (if needed)
prof_md5 = hashlib.md5(buf_profile).hexdigest()
in_profile = "/tmp/_image_%d_%s.icc" % (
os.getpid(), prof_md5)
if not os.path.exists(in_profile):
with open(in_profile, 'w') as icc_f:
icc_f.write(buf_profile)
# see if there is any EXIF tag about the colorspace
elif kwds.has_key('ColorSpace'):
csp = kwds['ColorSpace']
iop = kwds.get('InteroperabilityIndex', None)
if (csp == 0x2) or (csp == 0xffff):
# NOTE: 0xffff is really "undefined" and should be
# combined with a test of EXIF tag 0x0001
# ('InteropIndex') == 'R03', but PIL _getexif()
# does not return the InteropIndex
in_profile = os.path.join(basedir, "profiles",
"AdobeRGB.icc")
self.logger.debug("hmm..this looks like an AdobeRGB image")
elif csp == 0x1:
self.logger.debug("hmm..this looks like a sRGB image")
in_profile = os.path.join(basedir, "profiles",
"sRGB.icc")
else:
self.logger.debug("no color space metadata, assuming this is an sRGB image")
# if we have a valid input profile, try the conversion
if in_profile:
image = convert_profile_pil(image, in_profile,
working_profile)
self.logger.info("converted from profile (%s) to profile (%s)" % (
in_profile, working_profile))
except Exception, e:
self.logger.error("Error converting from embedded color profile: %s" % (str(e)))
self.logger.warn("Leaving image unprofiled.")
data_np = numpy.array(image)
elif have_qtimage:
means = 'QImage'
qimage = QImage()
qimage.load(filepath)
data_np = qimage2numpy(qimage)
else:
raise ImageError("No way to load image format '%s/%s'" % (
typ, subtyp))
end_time = time.time()
self.logger.debug("loading (%s) time %.4f sec" % (
means, end_time - start_time))
return data_np
def imload(self, filepath, kwds):
return self._imload(filepath, kwds)
def _imresize(self, data, new_wd, new_ht, method='bilinear'):
"""Scale an image in numpy array _data_ to the specified width and
height. A smooth scaling is preferred.
"""
old_ht, old_wd = data.shape[:2]
start_time = time.time()
if have_qtimage:
# QImage method is slightly faster and gives a smoother looking
# result than PIL
means = 'QImage'
qimage = numpy2qimage(data)
qimage = qimage.scaled(new_wd, new_ht,
transformMode=QtCore.Qt.SmoothTransformation)
newdata = qimage2numpy(qimage)
elif have_pilutil:
means = 'PIL'
zoom_x = float(new_wd) / float(old_wd)
zoom_y = float(new_ht) / float(old_ht)
if (old_wd >= new_wd) or (old_ht >= new_ht):
# data size is bigger, skip pixels
zoom = max(zoom_x, zoom_y)
else:
zoom = min(zoom_x, zoom_y)
newdata = pilutil.imresize(data, zoom, interp=method)
else:
raise ImageError("No way to scale image smoothly")
end_time = time.time()
self.logger.debug("scaling (%s) time %.4f sec" % (
means, end_time - start_time))
return newdata
# UTILITY FUNCTIONS
def open_ppm(filepath):
infile = open(filepath,'r')
# Get type: PPM or PGM
header = infile.readline()
ptype = header.strip().upper()
if ptype == 'P5':
depth = 1
elif ptype == 'P6':
depth = 3
#print header
# Get image dimensions
header = infile.readline().strip()
while header.startswith('#') or len(header) == 0:
header = infile.readline().strip()
print header
width, height = map(int, header.split())
header = infile.readline()
# Get unit size
maxval = int(header)
if maxval <= 255:
dtype = numpy.uint8
elif maxval <= 65535:
dtype = numpy.uint16
#print width, height, maxval
# read image
if depth > 1:
arr = numpy.fromfile(infile, dtype=dtype).reshape((height, width,
depth))
else:
arr = numpy.fromfile(infile, dtype=dtype).reshape((height, width))
if sys.byteorder == 'little':
arr = arr.byteswap()
return arr
# --- Credit ---
# the following function set by Hans Meine was found here:
# http://kogs-www.informatik.uni-hamburg.de/~meine/software/vigraqt/qimage2ndarray.py
#
# see also a newer version at
# http://kogs-www.informatik.uni-hamburg.de/~meine/software/qimage2ndarray/
#
def qimage2numpy(qimage):
"""Convert QImage to numpy.ndarray."""
#print "FORMAT IS %s" % str(qimage.format())
result_shape = (qimage.height(), qimage.width())
temp_shape = (qimage.height(),
qimage.bytesPerLine() * 8 / qimage.depth())
if qimage.format() in (QImage.Format_ARGB32_Premultiplied,
QImage.Format_ARGB32,
QImage.Format_RGB32):
dtype = numpy.uint8
result_shape += (4, )
temp_shape += (4, )
else:
raise ValueError("qimage2numpy only supports 32bit and 8bit images")
# FIXME: raise error if alignment does not match
buf = qimage.bits()
if hasattr(buf, 'asstring'):
# Qt4
buf = buf.asstring(qimage.numBytes())
else:
# PySide
buf = bytes(buf)
result = numpy.frombuffer(buf, dtype).reshape(temp_shape)
if result_shape != temp_shape:
result = result[:,:result_shape[1]]
# QImage loads the image as BGRA, we want RGB
#res = numpy.dstack((result[:, :, 2], result[:, :, 1], result[:, :, 0]))
res = numpy.empty((qimage.height(), qimage.width(), 3))
res[:, :, 0] = result[:, :, 2]
res[:, :, 1] = result[:, :, 1]
res[:, :, 2] = result[:, :, 0]
return res
def numpy2qimage(array):
if numpy.ndim(array) == 2:
return gray2qimage(array)
elif numpy.ndim(array) == 3:
return rgb2qimage(array)
raise ValueError("can only convert 2D or 3D arrays")
def gray2qimage(gray):
"""Convert the 2D numpy array `gray` into a 8-bit QImage with a gray
colormap. The first dimension represents the vertical image axis.
ATTENTION: This QImage carries an attribute `ndimage` with a
reference to the underlying numpy array that holds the data. On
Windows, the conversion into a QPixmap does not copy the data, so
that you have to take care that the QImage does not get garbage
collected (otherwise PyQt will throw away the wrapper, effectively
freeing the underlying memory - boom!)."""
if len(gray.shape) != 2:
raise ValueError("gray2QImage can only convert 2D arrays")
gray = numpy.require(gray, numpy.uint8, 'C')
h, w = gray.shape
result = QImage(gray.data, w, h, QImage.Format_Indexed8)
result.ndarray = gray
for i in range(256):
result.setColor(i, QColor(i, i, i).rgb())
return result
def rgb2qimage(rgb):
"""Convert the 3D numpy array `rgb` into a 32-bit QImage. `rgb` must
have three dimensions with the vertical, horizontal and RGB image axes.
ATTENTION: This QImage carries an attribute `ndimage` with a
reference to the underlying numpy array that holds the data. On
Windows, the conversion into a QPixmap does not copy the data, so
that you have to take care that the QImage does not get garbage
collected (otherwise PyQt will throw away the wrapper, effectively
freeing the underlying memory - boom!)."""
if len(rgb.shape) != 3:
raise ValueError("rgb2QImage can only convert 3D arrays")
if rgb.shape[2] not in (3, 4):
raise ValueError("rgb2QImage can expects the last dimension to contain exactly three (R,G,B) or four (R,G,B,A) channels")
h, w, channels = rgb.shape
# Qt expects 32bit BGRA data for color images:
bgra = numpy.empty((h, w, 4), numpy.uint8, 'C')
bgra[...,0] = rgb[...,2]
bgra[...,1] = rgb[...,1]
bgra[...,2] = rgb[...,0]
if rgb.shape[2] == 3:
bgra[...,3].fill(255)
fmt = QImage.Format_RGB32
else:
bgra[...,3] = rgb[...,3]
fmt = QImage.Format_ARGB32
result = QImage(bgra.data, w, h, fmt)
result.ndarray = bgra
return result
# --- end QImage to numpy conversion functions ---
def convert_profile_pil(image_pil, inprof_path, outprof_path):
image_out = ImageCms.profileToProfile(image_pil, inprof_path,
outprof_path)
return image_out
def convert_profile_numpy(image_np, inprof_path, outprof_path):
if not have_pilutil:
return image_np
in_image_pil = pilutil.toimage(image_np)
out_image_pil = pilutil.convert_profile_pil(in_image_pil,
inprof_path, outprof_path)
image_np = pilutil.fromimage(out_image_pil)
return image_out
#END
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
from collections.abc import Iterable
import json
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests import Request, PreparedRequest
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.region_instances import RegionInstancesClient
from google.cloud.compute_v1.services.region_instances import transports
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert RegionInstancesClient._get_default_mtls_endpoint(None) is None
assert (
RegionInstancesClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
RegionInstancesClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
RegionInstancesClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
RegionInstancesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
RegionInstancesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize(
"client_class,transport_name", [(RegionInstancesClient, "rest"),]
)
def test_region_instances_client_from_service_account_info(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info, transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
@pytest.mark.parametrize(
"transport_class,transport_name",
[(transports.RegionInstancesRestTransport, "rest"),],
)
def test_region_instances_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class,transport_name", [(RegionInstancesClient, "rest"),]
)
def test_region_instances_client_from_service_account_file(
client_class, transport_name
):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == (
"compute.googleapis.com{}".format(":443")
if transport_name in ["grpc", "grpc_asyncio"]
else "https://{}".format("compute.googleapis.com")
)
def test_region_instances_client_get_transport_class():
transport = RegionInstancesClient.get_transport_class()
available_transports = [
transports.RegionInstancesRestTransport,
]
assert transport in available_transports
transport = RegionInstancesClient.get_transport_class("rest")
assert transport == transports.RegionInstancesRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(RegionInstancesClient, transports.RegionInstancesRestTransport, "rest"),],
)
@mock.patch.object(
RegionInstancesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(RegionInstancesClient),
)
def test_region_instances_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(RegionInstancesClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(RegionInstancesClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
RegionInstancesClient,
transports.RegionInstancesRestTransport,
"rest",
"true",
),
(
RegionInstancesClient,
transports.RegionInstancesRestTransport,
"rest",
"false",
),
],
)
@mock.patch.object(
RegionInstancesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(RegionInstancesClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_region_instances_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [RegionInstancesClient])
@mock.patch.object(
RegionInstancesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(RegionInstancesClient),
)
def test_region_instances_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(RegionInstancesClient, transports.RegionInstancesRestTransport, "rest"),],
)
def test_region_instances_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[(RegionInstancesClient, transports.RegionInstancesRestTransport, "rest", None),],
)
def test_region_instances_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"request_type", [compute.BulkInsertRegionInstanceRequest, dict,]
)
def test_bulk_insert_unary_rest(request_type):
client = RegionInstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request_init["bulk_insert_instance_resource_resource"] = {
"count": 553,
"instance_properties": {
"advanced_machine_features": {
"enable_nested_virtualization": True,
"enable_uefi_networking": True,
"threads_per_core": 1689,
},
"can_ip_forward": True,
"confidential_instance_config": {"enable_confidential_compute": True},
"description": "description_value",
"disks": [
{
"auto_delete": True,
"boot": True,
"device_name": "device_name_value",
"disk_encryption_key": {
"kms_key_name": "kms_key_name_value",
"kms_key_service_account": "kms_key_service_account_value",
"raw_key": "raw_key_value",
"rsa_encrypted_key": "rsa_encrypted_key_value",
"sha256": "sha256_value",
},
"disk_size_gb": 1261,
"guest_os_features": [{"type_": "type__value"}],
"index": 536,
"initialize_params": {
"description": "description_value",
"disk_name": "disk_name_value",
"disk_size_gb": 1261,
"disk_type": "disk_type_value",
"labels": {},
"licenses": ["licenses_value_1", "licenses_value_2"],
"on_update_action": "on_update_action_value",
"provisioned_iops": 1740,
"resource_policies": [
"resource_policies_value_1",
"resource_policies_value_2",
],
"source_image": "source_image_value",
"source_image_encryption_key": {},
"source_snapshot": "source_snapshot_value",
"source_snapshot_encryption_key": {},
},
"interface": "interface_value",
"kind": "kind_value",
"licenses": ["licenses_value_1", "licenses_value_2"],
"mode": "mode_value",
"shielded_instance_initial_state": {
"dbs": [
{"content": "content_value", "file_type": "file_type_value"}
],
"dbxs": {},
"keks": {},
"pk": {},
},
"source": "source_value",
"type_": "type__value",
}
],
"guest_accelerators": [
{
"accelerator_count": 1805,
"accelerator_type": "accelerator_type_value",
}
],
"labels": {},
"machine_type": "machine_type_value",
"metadata": {
"fingerprint": "fingerprint_value",
"items": [{"key": "key_value", "value": "value_value"}],
"kind": "kind_value",
},
"min_cpu_platform": "min_cpu_platform_value",
"network_interfaces": [
{
"access_configs": [
{
"external_ipv6": "external_ipv6_value",
"external_ipv6_prefix_length": 2837,
"kind": "kind_value",
"name": "name_value",
"nat_i_p": "nat_i_p_value",
"network_tier": "network_tier_value",
"public_ptr_domain_name": "public_ptr_domain_name_value",
"set_public_ptr": True,
"type_": "type__value",
}
],
"alias_ip_ranges": [
{
"ip_cidr_range": "ip_cidr_range_value",
"subnetwork_range_name": "subnetwork_range_name_value",
}
],
"fingerprint": "fingerprint_value",
"ipv6_access_configs": {},
"ipv6_access_type": "ipv6_access_type_value",
"ipv6_address": "ipv6_address_value",
"kind": "kind_value",
"name": "name_value",
"network": "network_value",
"network_i_p": "network_i_p_value",
"nic_type": "nic_type_value",
"queue_count": 1197,
"stack_type": "stack_type_value",
"subnetwork": "subnetwork_value",
}
],
"network_performance_config": {
"total_egress_bandwidth_tier": "total_egress_bandwidth_tier_value"
},
"private_ipv6_google_access": "private_ipv6_google_access_value",
"reservation_affinity": {
"consume_reservation_type": "consume_reservation_type_value",
"key": "key_value",
"values": ["values_value_1", "values_value_2"],
},
"resource_manager_tags": {},
"resource_policies": [
"resource_policies_value_1",
"resource_policies_value_2",
],
"scheduling": {
"automatic_restart": True,
"instance_termination_action": "instance_termination_action_value",
"location_hint": "location_hint_value",
"min_node_cpus": 1379,
"node_affinities": [
{
"key": "key_value",
"operator": "operator_value",
"values": ["values_value_1", "values_value_2"],
}
],
"on_host_maintenance": "on_host_maintenance_value",
"preemptible": True,
"provisioning_model": "provisioning_model_value",
},
"service_accounts": [
{"email": "email_value", "scopes": ["scopes_value_1", "scopes_value_2"]}
],
"shielded_instance_config": {
"enable_integrity_monitoring": True,
"enable_secure_boot": True,
"enable_vtpm": True,
},
"tags": {
"fingerprint": "fingerprint_value",
"items": ["items_value_1", "items_value_2"],
},
},
"location_policy": {"locations": {}},
"min_count": 972,
"name_pattern": "name_pattern_value",
"per_instance_properties": {},
"source_instance_template": "source_instance_template_value",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.bulk_insert_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_bulk_insert_unary_rest_required_fields(
request_type=compute.BulkInsertRegionInstanceRequest,
):
transport_class = transports.RegionInstancesRestTransport
request_init = {}
request_init["project"] = ""
request_init["region"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).bulk_insert._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["region"] = "region_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).bulk_insert._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "region" in jsonified_request
assert jsonified_request["region"] == "region_value"
client = RegionInstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.bulk_insert_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_bulk_insert_unary_rest_unset_required_fields():
transport = transports.RegionInstancesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.bulk_insert._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",))
& set(("bulkInsertInstanceResourceResource", "project", "region",))
)
@pytest.mark.parametrize("null_interceptor", [True, False])
def test_bulk_insert_unary_rest_interceptors(null_interceptor):
transport = transports.RegionInstancesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
interceptor=None
if null_interceptor
else transports.RegionInstancesRestInterceptor(),
)
client = RegionInstancesClient(transport=transport)
with mock.patch.object(
type(client.transport._session), "request"
) as req, mock.patch.object(
path_template, "transcode"
) as transcode, mock.patch.object(
transports.RegionInstancesRestInterceptor, "post_bulk_insert"
) as post, mock.patch.object(
transports.RegionInstancesRestInterceptor, "pre_bulk_insert"
) as pre:
pre.assert_not_called()
post.assert_not_called()
transcode.return_value = {
"method": "post",
"uri": "my_uri",
"body": None,
"query_params": {},
}
req.return_value = Response()
req.return_value.status_code = 200
req.return_value.request = PreparedRequest()
req.return_value._content = compute.Operation.to_json(compute.Operation())
request = compute.BulkInsertRegionInstanceRequest()
metadata = [
("key", "val"),
("cephalopod", "squid"),
]
pre.return_value = request, metadata
post.return_value = compute.Operation
client.bulk_insert_unary(
request, metadata=[("key", "val"), ("cephalopod", "squid"),]
)
pre.assert_called_once()
post.assert_called_once()
def test_bulk_insert_unary_rest_bad_request(
transport: str = "rest", request_type=compute.BulkInsertRegionInstanceRequest
):
client = RegionInstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "region": "sample2"}
request_init["bulk_insert_instance_resource_resource"] = {
"count": 553,
"instance_properties": {
"advanced_machine_features": {
"enable_nested_virtualization": True,
"enable_uefi_networking": True,
"threads_per_core": 1689,
},
"can_ip_forward": True,
"confidential_instance_config": {"enable_confidential_compute": True},
"description": "description_value",
"disks": [
{
"auto_delete": True,
"boot": True,
"device_name": "device_name_value",
"disk_encryption_key": {
"kms_key_name": "kms_key_name_value",
"kms_key_service_account": "kms_key_service_account_value",
"raw_key": "raw_key_value",
"rsa_encrypted_key": "rsa_encrypted_key_value",
"sha256": "sha256_value",
},
"disk_size_gb": 1261,
"guest_os_features": [{"type_": "type__value"}],
"index": 536,
"initialize_params": {
"description": "description_value",
"disk_name": "disk_name_value",
"disk_size_gb": 1261,
"disk_type": "disk_type_value",
"labels": {},
"licenses": ["licenses_value_1", "licenses_value_2"],
"on_update_action": "on_update_action_value",
"provisioned_iops": 1740,
"resource_policies": [
"resource_policies_value_1",
"resource_policies_value_2",
],
"source_image": "source_image_value",
"source_image_encryption_key": {},
"source_snapshot": "source_snapshot_value",
"source_snapshot_encryption_key": {},
},
"interface": "interface_value",
"kind": "kind_value",
"licenses": ["licenses_value_1", "licenses_value_2"],
"mode": "mode_value",
"shielded_instance_initial_state": {
"dbs": [
{"content": "content_value", "file_type": "file_type_value"}
],
"dbxs": {},
"keks": {},
"pk": {},
},
"source": "source_value",
"type_": "type__value",
}
],
"guest_accelerators": [
{
"accelerator_count": 1805,
"accelerator_type": "accelerator_type_value",
}
],
"labels": {},
"machine_type": "machine_type_value",
"metadata": {
"fingerprint": "fingerprint_value",
"items": [{"key": "key_value", "value": "value_value"}],
"kind": "kind_value",
},
"min_cpu_platform": "min_cpu_platform_value",
"network_interfaces": [
{
"access_configs": [
{
"external_ipv6": "external_ipv6_value",
"external_ipv6_prefix_length": 2837,
"kind": "kind_value",
"name": "name_value",
"nat_i_p": "nat_i_p_value",
"network_tier": "network_tier_value",
"public_ptr_domain_name": "public_ptr_domain_name_value",
"set_public_ptr": True,
"type_": "type__value",
}
],
"alias_ip_ranges": [
{
"ip_cidr_range": "ip_cidr_range_value",
"subnetwork_range_name": "subnetwork_range_name_value",
}
],
"fingerprint": "fingerprint_value",
"ipv6_access_configs": {},
"ipv6_access_type": "ipv6_access_type_value",
"ipv6_address": "ipv6_address_value",
"kind": "kind_value",
"name": "name_value",
"network": "network_value",
"network_i_p": "network_i_p_value",
"nic_type": "nic_type_value",
"queue_count": 1197,
"stack_type": "stack_type_value",
"subnetwork": "subnetwork_value",
}
],
"network_performance_config": {
"total_egress_bandwidth_tier": "total_egress_bandwidth_tier_value"
},
"private_ipv6_google_access": "private_ipv6_google_access_value",
"reservation_affinity": {
"consume_reservation_type": "consume_reservation_type_value",
"key": "key_value",
"values": ["values_value_1", "values_value_2"],
},
"resource_manager_tags": {},
"resource_policies": [
"resource_policies_value_1",
"resource_policies_value_2",
],
"scheduling": {
"automatic_restart": True,
"instance_termination_action": "instance_termination_action_value",
"location_hint": "location_hint_value",
"min_node_cpus": 1379,
"node_affinities": [
{
"key": "key_value",
"operator": "operator_value",
"values": ["values_value_1", "values_value_2"],
}
],
"on_host_maintenance": "on_host_maintenance_value",
"preemptible": True,
"provisioning_model": "provisioning_model_value",
},
"service_accounts": [
{"email": "email_value", "scopes": ["scopes_value_1", "scopes_value_2"]}
],
"shielded_instance_config": {
"enable_integrity_monitoring": True,
"enable_secure_boot": True,
"enable_vtpm": True,
},
"tags": {
"fingerprint": "fingerprint_value",
"items": ["items_value_1", "items_value_2"],
},
},
"location_policy": {"locations": {}},
"min_count": 972,
"name_pattern": "name_pattern_value",
"per_instance_properties": {},
"source_instance_template": "source_instance_template_value",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.bulk_insert_unary(request)
def test_bulk_insert_unary_rest_flattened():
client = RegionInstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "region": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
region="region_value",
bulk_insert_instance_resource_resource=compute.BulkInsertInstanceResource(
count=553
),
)
mock_args.update(sample_request)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
client.bulk_insert_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"%s/compute/v1/projects/{project}/regions/{region}/instances/bulkInsert"
% client.transport._host,
args[1],
)
def test_bulk_insert_unary_rest_flattened_error(transport: str = "rest"):
client = RegionInstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.bulk_insert_unary(
compute.BulkInsertRegionInstanceRequest(),
project="project_value",
region="region_value",
bulk_insert_instance_resource_resource=compute.BulkInsertInstanceResource(
count=553
),
)
def test_bulk_insert_unary_rest_error():
client = RegionInstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.RegionInstancesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionInstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.RegionInstancesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionInstancesClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.RegionInstancesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = RegionInstancesClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = RegionInstancesClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.RegionInstancesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = RegionInstancesClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.RegionInstancesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = RegionInstancesClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize("transport_class", [transports.RegionInstancesRestTransport,])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_region_instances_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.RegionInstancesTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_region_instances_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.region_instances.transports.RegionInstancesTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.RegionInstancesTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = ("bulk_insert",)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_region_instances_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.region_instances.transports.RegionInstancesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RegionInstancesTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_region_instances_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.compute_v1.services.region_instances.transports.RegionInstancesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.RegionInstancesTransport()
adc.assert_called_once()
def test_region_instances_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
RegionInstancesClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_region_instances_http_transport_client_cert_source_for_mtls():
cred = ga_credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.RegionInstancesRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_region_instances_host_no_port(transport_name):
client = RegionInstancesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:443"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com"
)
@pytest.mark.parametrize("transport_name", ["rest",])
def test_region_instances_host_with_port(transport_name):
client = RegionInstancesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
transport=transport_name,
)
assert client.transport._host == (
"compute.googleapis.com:8000"
if transport_name in ["grpc", "grpc_asyncio"]
else "https://compute.googleapis.com:8000"
)
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = RegionInstancesClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = RegionInstancesClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = RegionInstancesClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = RegionInstancesClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = RegionInstancesClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = RegionInstancesClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = RegionInstancesClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = RegionInstancesClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = RegionInstancesClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = RegionInstancesClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = RegionInstancesClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = RegionInstancesClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = RegionInstancesClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = RegionInstancesClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = RegionInstancesClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.RegionInstancesTransport, "_prep_wrapped_messages"
) as prep:
client = RegionInstancesClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.RegionInstancesTransport, "_prep_wrapped_messages"
) as prep:
transport_class = RegionInstancesClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
def test_transport_close():
transports = {
"rest": "_session",
}
for transport, close_name in transports.items():
client = RegionInstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"rest",
]
for transport in transports:
client = RegionInstancesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[(RegionInstancesClient, transports.RegionInstancesRestTransport),],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| |
from mpi4py import MPI
import mpiunittest as unittest
import arrayimpl
import sys, os, tempfile
def subTestIO(case, *args, **kwargs):
for array, typecode in arrayimpl.subTest(case, *args, **kwargs):
if unittest.is_mpi_gpu('mvapich2', array): continue
yield array, typecode
class BaseTestIO(object):
COMM = MPI.COMM_NULL
FILE = MPI.FILE_NULL
prefix = 'mpi4py-'
def setUp(self):
comm = self.COMM
fname = None
if comm.Get_rank() == 0:
fd, fname = tempfile.mkstemp(prefix=self.prefix)
os.close(fd)
fname = comm.bcast(fname, 0)
amode = MPI.MODE_RDWR | MPI.MODE_CREATE
amode |= MPI.MODE_DELETE_ON_CLOSE
amode |= MPI.MODE_UNIQUE_OPEN
info = MPI.INFO_NULL
try:
self.FILE = MPI.File.Open(comm, fname, amode, info)
except Exception:
if comm.Get_rank() == 0:
os.remove(fname)
raise
def tearDown(self):
if self.FILE:
self.FILE.Close()
self.COMM.Barrier()
# non-collective
def testReadWriteAt(self):
comm = self.COMM
size = comm.Get_size()
rank = comm.Get_rank()
fh = self.FILE
for array, typecode in subTestIO(self):
etype = array.TypeMap[typecode]
fh.Set_size(0)
fh.Set_view(0, etype)
count = 13
wbuf = array(42, typecode, count)
fh.Write_at(count*rank, wbuf.as_raw())
fh.Sync()
comm.Barrier()
fh.Sync()
rbuf = array(-1, typecode, count+1)
fh.Read_at(count*rank, rbuf.as_mpi_c(count))
for value in rbuf[:-1]:
self.assertEqual(value, 42)
self.assertEqual(rbuf[-1], -1)
comm.Barrier()
def testIReadIWriteAt(self):
comm = self.COMM
size = comm.Get_size()
rank = comm.Get_rank()
fh = self.FILE
for array, typecode in subTestIO(self):
etype = array.TypeMap[typecode]
fh.Set_size(0)
fh.Set_view(0, etype)
count = 13
wbuf = array(42, typecode, count)
fh.Iwrite_at(count*rank, wbuf.as_raw()).Wait()
fh.Sync()
comm.Barrier()
fh.Sync()
rbuf = array(-1, typecode, count+1)
fh.Iread_at(count*rank, rbuf.as_mpi_c(count)).Wait()
for value in rbuf[:-1]:
self.assertEqual(value, 42)
self.assertEqual(rbuf[-1], -1)
comm.Barrier()
def testReadWrite(self):
comm = self.COMM
size = comm.Get_size()
rank = comm.Get_rank()
fh = self.FILE
for array, typecode in subTestIO(self):
etype = array.TypeMap[typecode]
fh.Set_size(0)
fh.Set_view(0, etype)
count = 13
wbuf = array(42, typecode, count)
for r in range(size):
if r == rank:
fh.Seek(0, MPI.SEEK_SET)
fh.Write(wbuf.as_raw())
fh.Sync()
comm.Barrier()
fh.Sync()
for n in range(0, len(wbuf)):
rbuf = array(-1, typecode, n+1)
fh.Seek(0, MPI.SEEK_SET)
fh.Read(rbuf.as_mpi_c(n))
for value in rbuf[:-1]:
self.assertEqual(value, 42)
self.assertEqual(rbuf[-1], -1)
comm.Barrier()
def testIReadIWrite(self):
comm = self.COMM
size = comm.Get_size()
rank = comm.Get_rank()
fh = self.FILE
for array, typecode in subTestIO(self):
etype = array.TypeMap[typecode]
fh.Set_size(0)
fh.Set_view(0, etype)
count = 13
wbuf = array(42, typecode, count)
for r in range(size):
if r == rank:
fh.Seek(0, MPI.SEEK_SET)
fh.Iwrite(wbuf.as_raw()).Wait()
fh.Sync()
comm.Barrier()
fh.Sync()
for n in range(0, len(wbuf)):
rbuf = array(-1, typecode, n+1)
fh.Seek(0, MPI.SEEK_SET)
fh.Iread(rbuf.as_mpi_c(n)).Wait()
for value in rbuf[:-1]:
self.assertEqual(value, 42)
self.assertEqual(rbuf[-1], -1)
comm.Barrier()
def testReadWriteShared(self):
comm = self.COMM
size = comm.Get_size()
rank = comm.Get_rank()
fh = self.FILE
for array, typecode in subTestIO(self):
etype = array.TypeMap[typecode]
fh.Set_size(0)
fh.Set_view(0, etype)
count = 13
wbuf = array(rank%42, typecode, count)
fh.Seek_shared(0, MPI.SEEK_SET)
fh.Write_shared(wbuf.as_raw())
fh.Sync()
comm.Barrier()
fh.Sync()
rbuf = array(-1, typecode, count+1)
fh.Seek_shared(0, MPI.SEEK_SET)
fh.Read_shared(rbuf.as_mpi_c(count))
for value in rbuf[:-1]:
self.assertTrue(0<=value<42)
self.assertEqual(value, rbuf[0])
self.assertEqual(rbuf[-1], -1)
comm.Barrier()
def testIReadIWriteShared(self):
comm = self.COMM
size = comm.Get_size()
rank = comm.Get_rank()
fh = self.FILE
for array, typecode in subTestIO(self):
etype = array.TypeMap[typecode]
fh.Set_size(0)
fh.Set_view(0, etype)
count = 13
wbuf = array(rank%42, typecode, count)
fh.Seek_shared(0, MPI.SEEK_SET)
fh.Iwrite_shared(wbuf.as_raw()).Wait()
fh.Sync()
comm.Barrier()
fh.Sync()
rbuf = array(-1, typecode, count+1)
fh.Seek_shared(0, MPI.SEEK_SET)
fh.Iread_shared(rbuf.as_mpi_c(count)).Wait()
for value in rbuf[:-1]:
self.assertTrue(0<=value<42)
self.assertEqual(value, rbuf[0])
self.assertEqual(rbuf[-1], -1)
comm.Barrier()
# collective
def testReadWriteAtAll(self):
comm = self.COMM
size = comm.Get_size()
rank = comm.Get_rank()
fh = self.FILE
for array, typecode in subTestIO(self):
etype = array.TypeMap[typecode]
fh.Set_size(0)
fh.Set_view(0, etype)
count = 13
wbuf = array(42, typecode, count)
fh.Write_at_all(count*rank, wbuf.as_raw())
fh.Sync()
comm.Barrier()
fh.Sync()
rbuf = array(-1, typecode, count+1)
fh.Read_at_all(count*rank, rbuf.as_mpi_c(count))
for value in rbuf[:-1]:
self.assertEqual(value, 42)
self.assertEqual(rbuf[-1], -1)
comm.Barrier()
@unittest.skipMPI('SpectrumMPI')
def testIReadIWriteAtAll(self):
comm = self.COMM
size = comm.Get_size()
rank = comm.Get_rank()
fh = self.FILE
try: # MPI 3.1
for array, typecode in subTestIO(self):
etype = array.TypeMap[typecode]
fh.Set_size(0)
fh.Set_view(0, etype)
count = 13
wbuf = array(42, typecode, count)
fh.Iwrite_at_all(count*rank, wbuf.as_raw()).Wait()
fh.Sync()
comm.Barrier()
fh.Sync()
rbuf = array(-1, typecode, count+1)
fh.Iread_at_all(count*rank, rbuf.as_mpi_c(count)).Wait()
for value in rbuf[:-1]:
self.assertEqual(value, 42)
self.assertEqual(rbuf[-1], -1)
comm.Barrier()
except NotImplementedError:
if MPI.Get_version() >= (3, 1): raise
self.skipTest('mpi-iwrite_at_all')
def testReadWriteAtAllBeginEnd(self):
comm = self.COMM
size = comm.Get_size()
rank = comm.Get_rank()
fh = self.FILE
for array, typecode in subTestIO(self):
etype = array.TypeMap[typecode]
fh.Set_size(0)
fh.Set_view(0, etype)
count = 13
wbuf = array(42, typecode, count)
fh.Write_at_all_begin(count*rank, wbuf.as_raw())
fh.Write_at_all_end(wbuf.as_raw())
fh.Sync()
comm.Barrier()
fh.Sync()
rbuf = array(-1, typecode, count+1)
fh.Read_at_all_begin(count*rank, rbuf.as_mpi_c(count))
fh.Read_at_all_end(rbuf.as_raw())
for value in rbuf[:-1]:
self.assertEqual(value, 42)
self.assertEqual(rbuf[-1], -1)
comm.Barrier()
def testReadWriteAll(self):
comm = self.COMM
size = comm.Get_size()
rank = comm.Get_rank()
fh = self.FILE
for array, typecode in subTestIO(self):
etype = array.TypeMap[typecode]
fh.Set_size(0)
fh.Set_view(0, etype)
count = 13
wbuf = array(42, typecode, count)
fh.Seek(count*rank, MPI.SEEK_SET)
fh.Write_all(wbuf.as_raw())
fh.Sync()
comm.Barrier()
fh.Sync()
rbuf = array(-1, typecode, count+1)
fh.Seek(count*rank, MPI.SEEK_SET)
fh.Read_all(rbuf.as_mpi_c(count))
for value in rbuf[:-1]:
self.assertEqual(value, 42)
self.assertEqual(rbuf[-1], -1)
comm.Barrier()
@unittest.skipMPI('SpectrumMPI')
def testIReadIWriteAll(self):
comm = self.COMM
size = comm.Get_size()
rank = comm.Get_rank()
fh = self.FILE
try: # MPI 3.1
for array, typecode in subTestIO(self):
etype = array.TypeMap[typecode]
fh.Set_size(0)
fh.Set_view(0, etype)
count = 13
wbuf = array(42, typecode, count)
fh.Seek(count*rank, MPI.SEEK_SET)
fh.Iwrite_all(wbuf.as_raw()).Wait()
fh.Sync()
comm.Barrier()
fh.Sync()
rbuf = array(-1, typecode, count+1)
fh.Seek(count*rank, MPI.SEEK_SET)
fh.Iread_all(rbuf.as_mpi_c(count)).Wait()
for value in rbuf[:-1]:
self.assertEqual(value, 42)
self.assertEqual(rbuf[-1], -1)
comm.Barrier()
except NotImplementedError:
if MPI.Get_version() >= (3, 1): raise
self.skipTest('mpi-iwrite_all')
def testReadWriteAllBeginEnd(self):
comm = self.COMM
size = comm.Get_size()
rank = comm.Get_rank()
fh = self.FILE
for array, typecode in subTestIO(self):
etype = array.TypeMap[typecode]
fh.Set_size(0)
fh.Set_view(0, etype)
count = 13
wbuf = array(42, typecode, count)
fh.Seek(count*rank, MPI.SEEK_SET)
fh.Write_all_begin(wbuf.as_raw())
fh.Write_all_end(wbuf.as_raw())
fh.Sync()
comm.Barrier()
fh.Sync()
rbuf = array(-1, typecode, count+1)
fh.Seek(count*rank, MPI.SEEK_SET)
fh.Read_all_begin(rbuf.as_mpi_c(count))
fh.Read_all_end(rbuf.as_raw())
for value in rbuf[:-1]:
self.assertEqual(value, 42)
self.assertEqual(rbuf[-1], -1)
comm.Barrier()
def testReadWriteOrdered(self):
comm = self.COMM
size = comm.Get_size()
rank = comm.Get_rank()
fh = self.FILE
for array, typecode in subTestIO(self):
etype = array.TypeMap[typecode]
fh.Set_size(0)
fh.Set_view(0, etype)
count = 13
wbuf = array(rank%42, typecode, count)
fh.Seek_shared(0, MPI.SEEK_SET)
fh.Write_ordered(wbuf.as_raw())
fh.Sync()
comm.Barrier()
fh.Sync()
rbuf = array(-1, typecode, count+1)
fh.Seek_shared(0, MPI.SEEK_SET)
fh.Read_ordered(rbuf.as_mpi_c(count))
for value in rbuf[:-1]:
self.assertEqual(value, rank%42)
self.assertEqual(rbuf[-1], -1)
comm.Barrier()
def testReadWriteOrderedBeginEnd(self):
comm = self.COMM
size = comm.Get_size()
rank = comm.Get_rank()
fh = self.FILE
for array, typecode in subTestIO(self):
etype = array.TypeMap[typecode]
fh.Set_size(0)
fh.Set_view(0, etype)
count = 13
wbuf = array(rank%42, typecode, count)
fh.Seek_shared(0, MPI.SEEK_SET)
fh.Write_ordered_begin(wbuf.as_raw())
fh.Write_ordered_end(wbuf.as_raw())
fh.Sync()
comm.Barrier()
fh.Sync()
rbuf = array(-1, typecode, count+1)
fh.Seek_shared(0, MPI.SEEK_SET)
fh.Read_ordered_begin(rbuf.as_mpi_c(count))
fh.Read_ordered_end(rbuf.as_raw())
for value in rbuf[:-1]:
self.assertEqual(value, rank%42)
self.assertEqual(rbuf[-1], -1)
comm.Barrier()
@unittest.skipMPI('MPICH1')
@unittest.skipMPI('LAM/MPI')
class TestIOSelf(BaseTestIO, unittest.TestCase):
COMM = MPI.COMM_SELF
prefix = BaseTestIO.prefix + ('%d-' % MPI.COMM_WORLD.Get_rank())
@unittest.skipMPI('openmpi(<2.2.0)')
@unittest.skipMPI('msmpi')
@unittest.skipMPI('MPICH2')
@unittest.skipMPI('MPICH1')
@unittest.skipMPI('LAM/MPI')
class TestIOWorld(BaseTestIO, unittest.TestCase):
COMM = MPI.COMM_WORLD
def have_feature():
case = BaseTestIO()
case.COMM = TestIOSelf.COMM
case.prefix = TestIOSelf.prefix
case.setUp()
case.tearDown()
try:
have_feature()
except NotImplementedError:
unittest.disable(BaseTestIO, 'mpi-io')
if __name__ == '__main__':
unittest.main()
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Semantic segmentation metric."""
import gin
import gin.tf
import numpy as np
import tensorflow as tf
from tf3d import standard_fields
from object_detection.utils import label_map_util
@gin.configurable
class SemanticSegmentationMetric(tf.keras.metrics.Metric):
"""Semantic segmentation mean intersection over union metric."""
def __init__(self,
multi_label=False,
num_classes=None,
label_map=None,
label_map_path=None,
eval_prefix='eval',
name='semantic_segmentation_metric'):
"""Semantic segmentation mean intersection over union metric.
Args:
multi_label: Boolean which denotes if pixels can be assigned multiple
labels; classes are treated separately, logit > 0 is positive
prediction.
num_classes: Number of classes.
label_map: A dictionary mapping label ids to label names.
label_map_path: path to labelmap (could be None).
eval_prefix: Prefix for eval name; separates scalar values in Tensorboard.
name: class name.
"""
super(SemanticSegmentationMetric, self).__init__(name=name)
self.multi_label = multi_label
self.num_classes = num_classes
if label_map:
self.label_map = label_map
elif label_map_path:
self.label_map = _get_label_map(label_map_path)
else:
self.label_map = None
self.eval_prefix = eval_prefix
if self.label_map is not None:
self.class_range = self.label_map.keys()
elif num_classes is not None:
self.class_range = range(num_classes)
else:
raise ValueError('Both num_classes and label_map are None.')
self.true_positive_metrics = {}
self.false_positive_metrics = {}
self.false_negative_metrics = {}
for c in self.class_range:
self.true_positive_metrics[c] = tf.keras.metrics.TruePositives(
name=('%s_true_positive_%d' % (name, c)))
self.false_positive_metrics[c] = tf.keras.metrics.FalsePositives(
name=('%s_false_positive_%d' % (name, c)))
self.false_negative_metrics[c] = tf.keras.metrics.FalseNegatives(
name=('%s_false_negative_%d' % (name, c)))
def update_state(self, inputs, outputs):
"""Function that updates the metric state at each example.
Args:
inputs: A dictionary containing input tensors.
outputs: A dictionary containing output tensors.
Returns:
Update op.
"""
# Prepare logits and labels
logits = outputs[
standard_fields.DetectionResultFields.object_semantic_points]
labels = inputs[standard_fields.InputDataFields.object_class_points]
weights = inputs[standard_fields.InputDataFields.point_loss_weights]
num_valid_points = inputs[standard_fields.InputDataFields.num_valid_points]
if len(logits.get_shape().as_list()) == 3:
batch_size = logits.get_shape().as_list()[0]
logits_list = []
labels_list = []
weights_list = []
for i in range(batch_size):
num_valid_points_i = num_valid_points[i]
logits_list.append(logits[i, 0:num_valid_points_i, :])
labels_list.append(labels[i, 0:num_valid_points_i, :])
weights_list.append(weights[i, 0:num_valid_points_i, :])
logits = tf.concat(logits_list, axis=0)
labels = tf.concat(labels_list, axis=0)
weights = tf.concat(weights_list, axis=0)
if self.num_classes is None:
num_classes = logits.get_shape().as_list()[-1]
else:
num_classes = self.num_classes
if num_classes != logits.get_shape().as_list()[-1]:
raise ValueError('num_classes do not match the logits dimensions.')
class_labels, class_predictions = _get_class_labels_and_predictions(
labels=labels,
logits=logits,
num_classes=self.num_classes,
multi_label=self.multi_label)
update_ops = []
for c in self.class_range:
update_op_tp_c = self.true_positive_metrics[c].update_state(
y_true=class_labels[c],
y_pred=class_predictions[c],
sample_weight=weights)
update_ops.append(update_op_tp_c)
update_op_fp_c = self.false_positive_metrics[c].update_state(
y_true=class_labels[c],
y_pred=class_predictions[c],
sample_weight=weights)
update_ops.append(update_op_fp_c)
update_op_fn_c = self.false_negative_metrics[c].update_state(
y_true=class_labels[c],
y_pred=class_predictions[c],
sample_weight=weights)
update_ops.append(update_op_fn_c)
return tf.group(update_ops)
def result(self):
metrics_dict = self.get_metric_dictionary()
return metrics_dict[self.eval_prefix + '_avg/mean_iou']
def get_metric_dictionary(self):
metrics_dict = {}
class_recall_list = [] # used for calculating mean pixel accuracy.
class_iou_list = [] # used for calculating mean iou.
for c in self.class_range:
tp = self.true_positive_metrics[c].result()
fp = self.false_positive_metrics[c].result()
fn = self.false_negative_metrics[c].result()
class_recall = tp / (tp + fn)
class_precision = tf.where(
tf.greater(tp + fn, 0.0), _safe_div(tp, (tp + fp)),
tf.constant(np.NaN))
class_iou = tf.where(
tf.greater(tp + fn, 0.0), tp / (tp + fn + fp), tf.constant(np.NaN))
class_recall_list.append(class_recall)
class_iou_list.append(class_iou)
class_name = _get_class_name(class_id=c, label_map=self.label_map)
metrics_dict[self.eval_prefix +
'_recall/{}'.format(class_name)] = class_recall
metrics_dict[self.eval_prefix +
'_precision/{}'.format(class_name)] = class_precision
metrics_dict[self.eval_prefix + '_iou/{}'.format(class_name)] = class_iou
mean_pixel_accuracy = _non_nan_mean(class_recall_list)
mean_iou = _non_nan_mean(class_iou_list)
metrics_dict[self.eval_prefix +
'_avg/mean_pixel_accuracy'] = mean_pixel_accuracy
metrics_dict[self.eval_prefix + '_avg/mean_iou'] = mean_iou
return metrics_dict
def reset_states(self):
for _, value in self.true_positive_metrics.items():
value.reset_states()
for _, value in self.false_positive_metrics.items():
value.reset_states()
for _, value in self.false_negative_metrics.items():
value.reset_states()
def _get_class_labels_and_predictions(labels, logits, num_classes, multi_label):
"""Returns list of per-class-labels and list of per-class-predictions.
Args:
labels: A `Tensor` of size [n, k]. In the
multi-label case, values are either 0 or 1 and k = num_classes. Otherwise,
k = 1 and values are in [0, num_classes).
logits: A `Tensor` of size [n, `num_classes`]
representing the logits of each pixel and semantic class.
num_classes: Number of classes.
multi_label: Boolean which defines if we are in a multi_label setting, where
pixels can have multiple labels, or not.
Returns:
class_labels: List of size num_classes, where each entry is a `Tensor' of
size [batch_size, height, width] of type float with values of 0 or 1
representing the ground truth labels.
class_predictions: List of size num_classes, each entry is a `Tensor' of
size [batch_size, height, width] of type float with values of 0 or 1
representing the predicted labels.
"""
class_predictions = [None] * num_classes
if multi_label:
class_labels = tf.split(labels, num_or_size_splits=num_classes, axis=1)
class_logits = tf.split(logits, num_or_size_splits=num_classes, axis=1)
for c in range(num_classes):
class_predictions[c] = tf.cast(
tf.greater(class_logits[c], 0), dtype=tf.float32)
else:
class_predictions_flat = tf.argmax(logits, 1)
class_labels = [None] * num_classes
for c in range(num_classes):
class_labels[c] = tf.cast(tf.equal(labels, c), dtype=tf.float32)
class_predictions[c] = tf.cast(
tf.equal(class_predictions_flat, c), dtype=tf.float32)
return class_labels, class_predictions
def _get_class_name(class_id, label_map):
"""Gets class name from label dictionary."""
if label_map and class_id in label_map:
return label_map[class_id]
else:
return str(class_id)
def _non_nan_mean(tensor_list):
"""Calculates the mean of a list of tensors while ignoring nans."""
tensor = tf.stack(tensor_list)
not_nan = tf.logical_not(tf.math.is_nan(tensor))
return tf.reduce_mean(tf.boolean_mask(tensor, not_nan))
def _safe_div(a, b):
"""Divides two numbers, returns 0 if denominator is (close to) 0."""
return tf.where(tf.less(tf.abs(b), 1e-10), 0.0, a / b)
def _get_label_map(label_map_path):
"""Returns dictionary mapping label IDs to class-names."""
if not label_map_path:
return None
label_map_proto = label_map_util.load_labelmap(label_map_path)
label_map = {}
for item in label_map_proto.item:
if item.HasField('display_name'):
label_map[item.id] = item.display_name
elif item.HasField('name'):
label_map[item.id] = item.name
return label_map
| |
from common_fixtures import * # NOQA
TEST_HANDLER_PREFIX = 'test-handler-'
@pytest.fixture(scope='module', autouse=True)
def tear_down(request, admin_user_client):
request.addfinalizer(lambda: _disable_test_handlers(admin_user_client))
def _get_extension(admin_client, extension_point_name, impl_name,
format='Dynamic : {}'):
for ep in admin_client.list_extension_point():
if ep.name == extension_point_name:
for impl in ep.implementations:
try:
if impl.properties.name == impl_name:
return impl
except AttributeError:
pass
if impl.name == format.format(impl_name):
return impl
return None
def _disable_test_handlers(client):
name = TEST_HANDLER_PREFIX + '%'
for h in client.list_external_handler(state='active',
name_like=name):
client.wait_success(h.deactivate())
# These test all don't work when things are run in parallel because they
# can interfere with other instance.start operations
@pytest.mark.nonparallel
def test_external_handler(admin_user_client):
name = '{}-{}'.format(TEST_HANDLER_PREFIX, random_str())
configs = [{'name': 'environment.create', 'onError': 'instance.stop'}]
h = admin_user_client.create_external_handler(name=name,
processConfigs=configs)
assert h.state == 'registering'
assert h.get('processConfigs') is None
assert h.data.fields.processConfigs == configs
h = admin_user_client.wait_success(h)
assert h.state == 'active'
assert h.data.fields.processConfigs is None
maps = h.externalHandlerExternalHandlerProcessMaps()
assert len(maps) == 1
assert maps[0].state == 'active'
assert maps[0].onError == 'instance.stop'
assert maps[0].eventName == 'environment.create'
process = maps[0].externalHandlerProcess()
assert process.state == 'active'
assert process.name == 'stack.create'
ep = _get_extension(admin_user_client, 'process.stack.create.handlers',
name)
assert ep is not None
@pytest.mark.nonparallel
def test_defaults(admin_user_client):
name = '{}-{}'.format(TEST_HANDLER_PREFIX, random_str())
configs = [{'name': 'instance.start'}]
h = admin_user_client.create_external_handler(name=name,
processConfigs=configs)
h = admin_user_client.wait_success(h)
assert h.state == 'active'
ep = _get_extension(admin_user_client, 'process.instance.start.handlers',
name)
assert ep is not None
assert ep.properties.retry is None
assert ep.properties.timeoutMillis is None
assert ep.properties.name == name
assert ep.properties.priority == '1000'
assert ep.properties.eventName == 'instance.start;handler={}'.format(name)
@pytest.mark.nonparallel
def test_properties(admin_user_client):
name = '{}-{}'.format(TEST_HANDLER_PREFIX, random_str())
configs = [{'name': 'instance.start'}]
h = admin_user_client.create_external_handler(name=name,
processConfigs=configs,
timeoutMillis=2000,
retries=4,
priority=1234)
h = admin_user_client.wait_success(h)
assert h.state == 'active'
ep = _get_extension(admin_user_client, 'process.instance.start.handlers',
name)
assert ep is not None
assert ep.properties.retry == '4'
assert ep.properties.timeoutMillis == '2000'
assert ep.properties.priority == '1234'
assert ep.properties.name == name
assert ep.properties.eventName == 'instance.start;handler={}'.format(name)
@pytest.mark.nonparallel
def test_pre_handler(admin_user_client):
name = '{}-{}'.format(TEST_HANDLER_PREFIX, random_str())
configs = [{'name': 'pre.instance.start'}]
h = admin_user_client.create_external_handler(name=name,
processConfigs=configs,
timeoutMillis=2000,
retries=4,
priority=1234)
h = admin_user_client.wait_success(h)
assert h.state == 'active'
ep = _get_extension(admin_user_client,
'process.instance.start.pre.listeners',
name)
assert ep is not None
assert ep.properties.eventName == \
'pre.instance.start;handler={}'.format(name)
@pytest.mark.nonparallel
def test_post_handler(admin_user_client):
name = '{}-{}'.format(TEST_HANDLER_PREFIX, random_str())
configs = [{'name': 'post.instance.start'}]
h = admin_user_client.create_external_handler(name=name,
processConfigs=configs,
timeoutMillis=2000,
retries=4,
priority=1234)
h = admin_user_client.wait_success(h)
assert h.state == 'active'
ep = _get_extension(admin_user_client,
'process.instance.start.post.listeners',
name)
assert ep is not None
assert ep.properties.eventName == \
'post.instance.start;handler={}'.format(name)
@pytest.mark.nonparallel
def test_enabled_disable(admin_user_client):
name = '{}-{}'.format(TEST_HANDLER_PREFIX, random_str())
configs = [{'name': 'instance.start'}]
h = admin_user_client.create_external_handler(name=name,
processConfigs=configs)
h = admin_user_client.wait_success(h)
ep = _get_extension(admin_user_client, 'process.instance.start.handlers',
name)
assert ep is not None
h = admin_user_client.wait_success(h.deactivate())
ep = _get_extension(admin_user_client, 'process.instance.start.handlers',
name)
assert ep is None
h = admin_user_client.wait_success(h.activate())
ep = _get_extension(admin_user_client, 'process.instance.start.handlers',
name)
assert ep is not None
admin_user_client.wait_success(
h.externalHandlerProcesses()[0].deactivate())
ep = _get_extension(admin_user_client, 'process.instance.start.handlers',
name)
assert ep is None
admin_user_client.wait_success(h.externalHandlerProcesses()[0].activate())
ep = _get_extension(admin_user_client, 'process.instance.start.handlers',
name)
assert ep is not None
admin_user_client.wait_success(
h.externalHandlerExternalHandlerProcessMaps()[0].deactivate())
ep = _get_extension(admin_user_client, 'process.instance.start.handlers',
name)
assert ep is None
admin_user_client.wait_success(
h.externalHandlerExternalHandlerProcessMaps()[0].activate())
ep = _get_extension(admin_user_client, 'process.instance.start.handlers',
name)
assert ep is not None
@pytest.mark.nonparallel
def test_event_name_comma(admin_user_client):
name = '{}-{}'.format(TEST_HANDLER_PREFIX, random_str())
configs = [{'name': 'pre.instance.start,instance.start'}]
h = admin_user_client.create_external_handler(name=name,
processConfigs=configs)
h = admin_user_client.wait_success(h)
processes = [x.name for x in h.externalHandlerProcesses()]
assert len(processes) == 2
assert 'pre.instance.start' in processes
assert 'instance.start' in processes
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Report.unitemized_contributions'
db.add_column(u'tx_tecreports_report', 'unitemized_contributions',
self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=12, decimal_places=2),
keep_default=False)
# Adding field 'Report.total_contributions'
db.add_column(u'tx_tecreports_report', 'total_contributions',
self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=12, decimal_places=2),
keep_default=False)
# Adding field 'Report.unitemized_expenditures'
db.add_column(u'tx_tecreports_report', 'unitemized_expenditures',
self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=12, decimal_places=2),
keep_default=False)
# Adding field 'Report.total_expenditures'
db.add_column(u'tx_tecreports_report', 'total_expenditures',
self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=12, decimal_places=2),
keep_default=False)
# Adding field 'Report.outstanding_loans'
db.add_column(u'tx_tecreports_report', 'outstanding_loans',
self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=12, decimal_places=2),
keep_default=False)
# Adding field 'Report.cash_on_hand'
db.add_column(u'tx_tecreports_report', 'cash_on_hand',
self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=12, decimal_places=2),
keep_default=False)
# Adding field 'Report.unitemized_pledges'
db.add_column(u'tx_tecreports_report', 'unitemized_pledges',
self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=12, decimal_places=2),
keep_default=False)
# Adding field 'Report.unitemized_loans'
db.add_column(u'tx_tecreports_report', 'unitemized_loans',
self.gf('django.db.models.fields.DecimalField')(default=0, max_digits=12, decimal_places=2),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Report.unitemized_contributions'
db.delete_column(u'tx_tecreports_report', 'unitemized_contributions')
# Deleting field 'Report.total_contributions'
db.delete_column(u'tx_tecreports_report', 'total_contributions')
# Deleting field 'Report.unitemized_expenditures'
db.delete_column(u'tx_tecreports_report', 'unitemized_expenditures')
# Deleting field 'Report.total_expenditures'
db.delete_column(u'tx_tecreports_report', 'total_expenditures')
# Deleting field 'Report.outstanding_loans'
db.delete_column(u'tx_tecreports_report', 'outstanding_loans')
# Deleting field 'Report.cash_on_hand'
db.delete_column(u'tx_tecreports_report', 'cash_on_hand')
# Deleting field 'Report.unitemized_pledges'
db.delete_column(u'tx_tecreports_report', 'unitemized_pledges')
# Deleting field 'Report.unitemized_loans'
db.delete_column(u'tx_tecreports_report', 'unitemized_loans')
models = {
u'tx_tecreports.contributionsbyamount': {
'Meta': {'ordering': "['low']", 'object_name': 'ContributionsByAmount'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'high': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'low': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '2'}),
'name': ('tx_tecreports.fields.MaxCharField', [], {'max_length': '250'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stats_by_amount'", 'to': u"orm['tx_tecreports.Report']"}),
'total': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'tx_tecreports.contributionsbydate': {
'Meta': {'ordering': "['date']", 'object_name': 'ContributionsByDate'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stats_by_date'", 'to': u"orm['tx_tecreports.Report']"}),
'total': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'tx_tecreports.contributionsbystate': {
'Meta': {'ordering': "['-amount']", 'object_name': 'ContributionsByState'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stats_by_state'", 'to': u"orm['tx_tecreports.Report']"}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'total': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'tx_tecreports.contributionsbyzipcode': {
'Meta': {'ordering': "['-amount']", 'object_name': 'ContributionsByZipcode'},
'amount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stats_by_zipcode'", 'to': u"orm['tx_tecreports.Report']"}),
'total': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'tx_tecreports.contributor': {
'Meta': {'object_name': 'Contributor'},
'address_1': ('tx_tecreports.fields.OptionalMaxCharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'address_2': ('tx_tecreports.fields.OptionalMaxCharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'city': ('tx_tecreports.fields.OptionalMaxCharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'first_name': ('tx_tecreports.fields.OptionalMaxCharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_entity': ('django.db.models.fields.BooleanField', [], {}),
'is_individual': ('django.db.models.fields.BooleanField', [], {}),
'last_name': ('tx_tecreports.fields.OptionalMaxCharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'state': ('tx_tecreports.fields.OptionalMaxCharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'suffix': ('tx_tecreports.fields.OptionalMaxCharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'title': ('tx_tecreports.fields.OptionalMaxCharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'type_of': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contributors'", 'to': u"orm['tx_tecreports.ContributorType']"}),
'zipcode': ('tx_tecreports.fields.OptionalMaxCharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
u'tx_tecreports.contributortype': {
'Meta': {'object_name': 'ContributorType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('tx_tecreports.fields.MaxCharField', [], {'max_length': '250'})
},
u'tx_tecreports.employer': {
'Meta': {'object_name': 'Employer'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('tx_tecreports.fields.MaxCharField', [], {'max_length': '250'})
},
u'tx_tecreports.filer': {
'Meta': {'object_name': 'Filer'},
'filer_id': ('tx_tecreports.fields.MaxCharField', [], {'max_length': '250'}),
'filer_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'filers'", 'to': u"orm['tx_tecreports.FilerType']"}),
'first_name': ('tx_tecreports.fields.OptionalMaxCharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('tx_tecreports.fields.OptionalMaxCharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'name_prefix': ('tx_tecreports.fields.OptionalMaxCharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'name_suffix': ('tx_tecreports.fields.OptionalMaxCharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'nickname': ('tx_tecreports.fields.OptionalMaxCharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'})
},
u'tx_tecreports.filertype': {
'Meta': {'object_name': 'FilerType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('tx_tecreports.fields.MaxCharField', [], {'max_length': '250'})
},
u'tx_tecreports.filing': {
'Meta': {'object_name': 'Filing'},
'filer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'filings'", 'to': u"orm['tx_tecreports.Filer']"}),
'filing_method': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'filings'", 'to': u"orm['tx_tecreports.FilingMethod']"}),
'is_correction': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'report_due': ('django.db.models.fields.DateField', [], {}),
'report_filed': ('django.db.models.fields.DateField', [], {}),
'report_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250', 'primary_key': 'True'}),
'report_type': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'tx_tecreports.filingmethod': {
'Meta': {'object_name': 'FilingMethod'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'tx_tecreports.filingtype': {
'Meta': {'object_name': 'FilingType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('tx_tecreports.fields.MaxCharField', [], {'max_length': '250'})
},
u'tx_tecreports.receipt': {
'Meta': {'ordering': "['date']", 'object_name': 'Receipt'},
'amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'contributor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'receipts'", 'to': u"orm['tx_tecreports.Contributor']"}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'employer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tx_tecreports.Employer']", 'null': 'True', 'blank': 'True'}),
'fec_id': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_out_of_state_pac': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'job_title': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'name_of_schedule': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['tx_tecreports.Receipt']"}),
'receipt_id': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'report': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'receipts'", 'to': u"orm['tx_tecreports.Report']"}),
'travel': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'receipt'", 'unique': 'True', 'null': 'True', 'to': u"orm['tx_tecreports.Travel']"})
},
u'tx_tecreports.report': {
'Meta': {'object_name': 'Report'},
'cash_on_hand': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'from_date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'outstanding_loans': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'report_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'report_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'through_date': ('django.db.models.fields.DateField', [], {}),
'total_contributions': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'total_expenditures': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'unitemized_contributions': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'unitemized_expenditures': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'unitemized_loans': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'}),
'unitemized_pledges': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '12', 'decimal_places': '2'})
},
u'tx_tecreports.travel': {
'Meta': {'object_name': 'Travel'},
'arrival_date': ('django.db.models.fields.DateField', [], {}),
'departure_date': ('django.db.models.fields.DateField', [], {}),
'departure_location': ('tx_tecreports.fields.MaxCharField', [], {'max_length': '250'}),
'destination': ('tx_tecreports.fields.MaxCharField', [], {'max_length': '250'}),
'first_name': ('tx_tecreports.fields.MaxCharField', [], {'max_length': '250'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('tx_tecreports.fields.MaxCharField', [], {'max_length': '250'}),
'means_of': ('tx_tecreports.fields.MaxCharField', [], {'max_length': '250'}),
'purpose': ('tx_tecreports.fields.MaxCharField', [], {'max_length': '250'}),
'suffix': ('tx_tecreports.fields.MaxCharField', [], {'max_length': '250'}),
'title': ('tx_tecreports.fields.MaxCharField', [], {'max_length': '250'})
}
}
complete_apps = ['tx_tecreports']
| |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 22 17:58:48 2016
@author: Avinash
"""
import numpy
from math import *
def global_var(var_set=0,k1=3.3,N_veh=5):
if var_set==0:
global T,N,M,g,l,a
global Pl,Ph,C,E,ei,ef,s,f,R,B
T=24
M=0.05
N=N_veh
data=numpy.genfromtxt('variable_sets\set1\param.csv', delimiter=',')
g=data[1]
l=data[2]
a=data[3]
Pl=(numpy.ones(shape=(1,N))*2.2)[0]
Ph=(numpy.ones(shape=(1,N))*k1)[0]
C=(numpy.ones(shape=(1,N))*16)[0]
E=(numpy.ones(shape=(1,N))*0.9)[0]
#ei=(numpy.ones(shape=(1,N))*0.1)[0]
#R=200
R=500
#B=8.5
#B=10
B=2.5
ef=(numpy.ones(shape=(1,N))*0.9)[0]
dataset=numpy.genfromtxt(str(N)+'data.csv', delimiter=',')
ei=dataset[0]
s=dataset[1]
f=dataset[2]
def variable_gen(N=50):
ei=numpy.zeros(shape=(1,N))[0]
s=[]
for i in range(N):
ei[i]=numpy.random.randint(1,4)*0.1
# Vehicle can come upto 18th hour
s.append(numpy.random.random()*14)
s.sort()
s=numpy.array(s)
f=numpy.zeros(shape=(1,N))[0]
for i in range(N):
f[i]=s[i]+6+numpy.random.random()*(4)
dataset=numpy.zeros(shape=(3,N))
dataset[0]=ei
dataset[1]=s
dataset[2]=f
numpy.savetxt(str(N)+'data'+".csv",dataset,delimiter=",")
def static(p,mode=0):
# mode=0 for LP-R-SCSP
# mode=1 for LP-C-SCSP
#print '1'
p=p.reshape((T,N))
u1=10
u2=10
u3=10
h=numpy.zeros(shape=(T,N))
# Calculation of h[t][i]
for t in range(T):
for i in range(N):
if t>int(s[i]) and t<int(f[i]):
h[t][i]=1
elif t==int(s[i]) and s[i]==int(s[i]):
h[t][i]=0
elif t==int(s[i]) and s[i]!=int(s[i]):
h[t][i]=int(s[i]+1)-s[i]
elif t==int(f[i]) and f[i]!=int(f[i]):
h[t][i]=f[i]-int(f[i])
else:
h[t][i]=0
#print '2',h.shape
x=numpy.zeros(shape=(T,N))
# Calcution of x
for t in range(T):
for i in range(N):
if t==0:
if t==int(s[i]):
x[t][i]=ei[i] # inintial e
elif t==int(f[i]+1):
x[t][i]=ef[i] # final e
else:
x[t][i]=0
else:
if t==int(s[i]):
x[t][i]=ei[i] # inintial e
elif t==int(f[i]+1):
x[t][i]=ef[i] # final e
else:
x[t][i]=x[t-1][i]+E[i]*h[t-1][i]*p[t-1][i]/C[i]
#print '3',x.shape
P1=numpy.zeros(shape=(T,N))
# Calculation of P1(maximum charging rate limit)
for t in range(T):
for i in range(N):
P1[t][i]=(ef[i]-x[t][i])*C[i]/E[i]
u=numpy.zeros(shape=(T,N))
d=numpy.zeros(shape=(T,N))
r=numpy.zeros(shape=(T,N))
# Calculation of u, d and r
for t in range(T):
for i in range(N):
if h[t][i]==1:
u[t][i]=p[t][i]-Pl[i] # Pl is lower charging rate limit
d[t][i]=min(P1[t][i],Ph[i])-p[t][i] # Ph is higher charging rate limit
else:
u[t][i]=0
d[t][i]=0
r[t][i]=u[t][i]+d[t][i]
#print '4'
if mode==0:
# Objective function value calculation
fval1=0
for t in range(T):
f1=0
for i in range(N):
f1+=r[t][i]
fval1+=a[t]*f1
fval2=0
for t in range(T):
for i in range(N):
fval2+=p[t][i]*h[t][i]
fval=fval1+M*fval2
elif mode==1:
fval=0
for t in range(T):
k1=0
for i in range(N):
k1+=p[t][i]*h[t][i]
fval+=(M+g[t])*k1
## Penalty calculations
# Constraint Number 11
penalty1=0
for t in range(T):
c1=0
psum=0
dsum=0
for i in range(N):
psum+=p[t][i]
dsum+=d[t][i]
c1=psum+l[t]+dsum-R
if c1>0:
penalty1+=u1*(c1**2)
penalty2=0
if mode==0:
# Constraint number 7
c2=0
for t in range(T):
k1=0
for i in range(N):
k1+=p[t][i]*h[t][i]
c2+=(M+g[t])*k1
c2=c2-B
if c2>0:
penalty2=u2*(c2**2)
# Constraint number 6 or boundary constraint
penalty3=0
for t in range(T):
for i in range(N):
if p[t][i]<Pl[i]:
penalty3+=u3*(p[t][i]-Pl[i])**2
if p[t][i]>Ph[i]:
penalty3+=u3*(Ph[i]-p[t][i])**2
if mode==0:
fval=fval-penalty1-penalty2-penalty3
return(-fval)
elif mode==1:
fval=fval+penalty1+penalty3
return(fval)
def dynamic(p,x,i,d,mode=0,get_d=0):
# mode=0 for LP-R-DCSP
# mode=1 for LP-C-DCSP
p[i]=x
p=p.T
u1=10
u2=10
u3=10
h=numpy.zeros(shape=(T,N))
# Calculation of h[t][i]
for t in range(T):
if t>int(s[i]) and t<int(f[i]):
h[t][i]=1
# if i==1:
# print ('1',h[t][i])
elif t==int(s[i]) and s[i]==int(s[i]):
h[t][i]=0
# if i==1:
# print ('2',h[t][i])
elif t==int(s[i]) and s[i]!=int(s[i]):
h[t][i]=int(s[i]+1)-s[i]
# if i==1:
# print ('3',h[t][i])
elif t==int(f[i]) and f[i]!=int(f[i]):
h[t][i]=f[i]-int(f[i])
# if i==1:
# print ('4',h[t][i])
else:
h[t][i]=0
# if i==1:
# print ('5',h[t][i])
x=numpy.zeros(shape=(T,N))
# Calcution of x
for t in range(T):
if t==0:
if t==int(s[i]):
x[t][i]=ei[i] # inintial e
elif t==int(f[i]+1):
x[t][i]=ef[i] # final e
else:
x[t][i]=0
else:
if t==int(s[i]):
x[t][i]=ei[i] # inintial e
elif t==int(f[i]):
x[t][i]=ef[i] # final e
else:
x[t][i]=x[t-1][i]+E[i]*h[t-1][i]*p[t-1][i]/C[i]
P1=numpy.zeros(shape=(T,N))
# Calculation of P1(maximum charging rate limit)
for t in range(T):
P1[t][i]=(ef[i]-x[t][i])*C[i]/E[i]
u=numpy.zeros(shape=(T,N))
#d=numpy.zeros(shape=(T,N))
r=numpy.zeros(shape=(T,N))
# Calculation of u, d and r
for t in range(T):
if h[t][i]==1:
u[t][i]=p[t][i]-Pl[i] # Pl is lower charging rate limit
d[t][i]=min(P1[t][i],Ph[i])-p[t][i] # Ph is higher charging rate limit
else:
u[t][i]=0
d[t][i]=0
r[t][i]=u[t][i]+d[t][i]
if mode==0:
# Objective function value calculation
fval1=0
fval2=0
# print h.T[i],r.T[i]
# print a[t],p.T[i]
for t in range(T):
fval1+=a[t]*r[t][i]
fval2+=p[t][i]*h[t][i]
# print 'h',h.T[i],'r',r.T[i]
# print 'a',a[t],'p',p.T[i]
# print ('1',fval1),('2',fval2)
# print '1'
# print h.T[i],r.T[i]
## print '2'
# print a[t],p.T[i]
# print '3',fval1
# print '4',fval2
#if i==1:
#print fval1,fval2,'a',a[t],'r',r[t][i],'p',p[t][i],'h',h[t][i]
fval=fval1+M*fval2
elif mode==1:
fval=0
for t in range(T):
fval+=(M+g[t])*p[t][i]*h[t][i]
#print h.T[i]
## Penalty calculations
# Constraint Number 11
penalty1=0
for t in range(T):
c1=0
psum=0
dsum=0
for i_temp in range(i+1):
psum+=p[t][i_temp]
dsum+=d[t][i_temp]
c1=psum+l[t]+dsum-R
if c1>0:
penalty1+=u1*(c1**2)
#print '1 temp'
penalty2=0
if mode==0:
# Constraint number 7
c2=0
for t in range(T):
c2+=(M+g[t])*p[t][i]*h[t][i]
c2=c2-B
if c2>0:
penalty2=u2*(c2**2)
# Constraint number 6 or boundary constraint
penalty3=0
for t in range(T):
if p[t][i]<Pl[i]:
penalty3+=u3*(p[t][i]-Pl[i])**2
if p[t][i]>Ph[i]:
penalty3+=u3*(Ph[i]-p[t][i])**2
if get_d==1:
return(d.T[i])
if mode==0:
#print penalty2
fval=fval-penalty1-penalty2-penalty3
return(-fval,fval+penalty1+penalty2+penalty3,penalty1,penalty2,penalty3)
elif mode==1:
fval=fval+penalty1+penalty3
return(fval+penalty1+penalty3,fval-penalty1-penalty3,penalty1,penalty2,penalty3)
| |
def prepare_annot_pairs(ibs, qaids, daids, qconfig2_, dconfig2_):
# Prepare lazy attributes for annotations
qannot_cfg = ibs.depc.stacked_config(None, 'featweight', qconfig2_)
dannot_cfg = ibs.depc.stacked_config(None, 'featweight', dconfig2_)
unique_qaids = set(qaids)
unique_daids = set(daids)
# Determine a unique set of annots per config
configured_aids = ut.ddict(set)
configured_aids[qannot_cfg].update(unique_qaids)
configured_aids[dannot_cfg].update(unique_daids)
# Make efficient annot-object representation
configured_obj_annots = {}
for config, aids in configured_aids.items():
annots = ibs.annots(sorted(list(aids)), config=config)
configured_obj_annots[config] = annots.view()
# These annot views behave like annot objects
# but they use the same internal cache
annots1 = configured_obj_annots[qannot_cfg].view(qaids)
annots2 = configured_obj_annots[dannot_cfg].view(daids)
return annots1, annots2
# def old_vsone_parts():
# if False:
# matchesORIG = match_list
# matches_auc(truth_list, matchesORIG)
# matches_SV = [match.apply_sver(inplace=False)
# for match in ut.ProgIter(matchesORIG, label='sver')]
# matches_auc(truth_list, matches_SV)
# matches_RAT = [match.apply_ratio_test(inplace=False)
# for match in ut.ProgIter(matchesORIG, label='ratio')]
# matches_auc(truth_list, matches_RAT)
# matches_RAT_SV = [match.apply_sver(inplace=False)
# for match in ut.ProgIter(matches_RAT, label='sver')]
# matches_auc(truth_list, matches_RAT_SV)
# if True:
# matches_RAT = match_list
# matches_auc(truth_list, matches_RAT)
# matches_RAT_SV = [match.apply_sver(inplace=False)
# for match in ut.ProgIter(matches_RAT, label='sver')]
# matches_auc(truth_list, matches_RAT_SV)
# if False:
# # Visualize scores
# score_list = np.array([m.fs.sum() for m in matches_RAT_SV])
# encoder = vt.ScoreNormalizer()
# encoder.fit(score_list, truth_list, verbose=True)
# encoder.visualize()
# # Fix issue
# # for match in ut.ProgIter(matches_RAT_SV):
# # match.annot1['yaw'] = ibs.get_annot_yaws_asfloat(match.annot1['aid'])
# # match.annot2['yaw'] = ibs.get_annot_yaws_asfloat(match.annot2['aid'])
# # # Construct global measurements
# # global_keys = ['yaw', 'qual', 'gps', 'time']
# # for match in ut.ProgIter(match_list, lbl='setup globals'):
# # match.global_measures = {}
# # for key in global_keys:
# # match.global_measures[key] = (match.annot1[key], match.annot2[key])
# if False:
# # TEST LNBNN SCORE SEP
# infr.apply_match_edges()
# infr.apply_match_scores()
# edge_to_score = infr.get_edge_attrs('score')
# lnbnn_score_list = [
# edge_to_score.get(tup) if tup in edge_to_score
# else edge_to_score.get(tup[::-1], 0)
# for tup in ut.lmap(tuple, aid_pairs)
# ]
# auc = sklearn.metrics.roc_auc_score(truth_list, lnbnn_score_list)
# print('auc = %r' % (auc,))
# if False:
# nfeats = len(withnan_cols) # NOQA
# param_grid = {
# 'bootstrap': [True, False],
# # 'class_weight': ['balanced', None],
# # 'criterion': ['gini', 'entropy'],
# # 'max_depth': [2, 4],
# 'max_features': [int(np.log2(nfeats)), int(np.sqrt(nfeats)), int(np.sqrt(nfeats)) * 2, nfeats],
# # 'max_features': [1, 3, 10],
# # 'min_samples_split': [2, 3, 5, 10],
# # 'min_samples_leaf': [1, 3, 5, 10, 20],
# # 'n_estimators': [128, 256],
# }
# static_params = {
# 'max_depth': 4,
# # 'bootstrap': False,
# 'class_weight': None,
# 'max_features': 'sqrt',
# 'missing_values': np.nan,
# 'min_samples_leaf': 5,
# 'min_samples_split': 2,
# 'n_estimators': 256,
# 'criterion': 'entropy',
# }
# from sklearn.model_selection import GridSearchCV
# clf = RandomForestClassifier(**static_params)
# search = GridSearchCV(clf, param_grid=param_grid, n_jobs=4, cv=3,
# refit=False, verbose=5)
# with ut.Timer('GridSearch'):
# search.fit(X_withnan, y)
# def report(results, n_top=3):
# for i in range(1, n_top + 1):
# candidates = np.flatnonzero(results['rank_test_score'] == i)
# for candidate in candidates:
# print('Model with rank: {0}'.format(i))
# print('Mean validation score: {0:.3f} (std: {1:.3f})'.format(
# results['mean_test_score'][candidate],
# results['std_test_score'][candidate]))
# print('Parameters: {0}'.format(results['params'][candidate]))
# print('')
# results = search.cv_results_
# report(results, n_top=10)
# print(ut.sort_dict(search.cv_results_).keys())
# params = results['params']
# cols = sorted(param_grid.keys())
# zX_df = pd.DataFrame([ut.take(p, cols) for p in params], columns=cols)
# # zX_df['class_weight'][pd.isnull(zX_df['class_weight'])] = 'none'
# if 'max_depth' in zX_df.columns:
# zX_df['max_depth'][pd.isnull(zX_df['max_depth'])] = 10
# if 'criterion' in zX_df.columns:
# zX_df['criterion'][zX_df['criterion'] == 'entropy'] = 0
# zX_df['criterion'][zX_df['criterion'] == 'gini'] = 1
# if 'class_weight' in zX_df.columns:
# zX_df['class_weight'][pd.isnull(zX_df['class_weight'])] = 0
# zX_df['class_weight'][zX_df['class_weight'] == 'balanced'] = 1
# [(c, zX_df[c].dtype) for c in cols]
# # zX = pd.get_dummies(zX_df).values.astype(np.float32)
# zX = zX_df.values.astype(np.float32)
# zY = mean_test_score = results['mean_test_score']
# from scipy.stats import mode
# # from pgmpy.factors.discrete import TabularCPD
# # TabularCPD('feat', top_feats.shape[0])
# num_top = 5
# top_feats = zX.take(zY.argsort()[::-1], axis=0)[0:num_top]
# print('num_top = %r' % (num_top,))
# print('Marginalized probabilities over top feature values')
# uvals = [np.unique(f) for f in top_feats.T]
# marginal_probs = [[np.sum(f == u) / len(f) for u in us] for us, f in zip(uvals , top_feats.T)]
# for c, us, mprobs in zip(cols, uvals, marginal_probs):
# print(c + ' = ' + ut.repr3(ut.dzip(us, mprobs), precision=2))
# mode_top_zX_ = mode(top_feats, axis=0)
# mode_top_zX = mode_top_zX_.mode[0]
# flags = (mode_top_zX_.count == 1)[0]
# mode_top_zX[flags] = top_feats[0][flags]
# print('mode')
# print(ut.repr4(ut.dzip(cols, mode_top_zX)))
# mean_top_zX = np.mean(top_feats, axis=0)
# print('mean')
# print(ut.repr4(ut.dzip(cols, mean_top_zX)))
# import sklearn.ensemble
# clf = sklearn.ensemble.RandomForestRegressor(bootstrap=True, oob_score=True)
# clf.fit(zX, zY)
# importances = dict(zip(cols, clf.feature_importances_))
# importances = ut.sort_dict(importances, 'vals', reverse=True)
# print(ut.align(ut.repr4(importances, precision=4), ':'))
# mean_test_score
# # print(df.to_string())
# # print(df_results)
# # TODO: TSNE?
# # http://scikit-learn.org/stable/auto_examples/manifold/plot_manifold_sphere.html#sphx-glr-auto-examples-manifold-plot-manifold-sphere-py
# # Perform t-distributed stochastic neighbor embedding.
# # from sklearn import manifold
# # import matplotlib.pyplot as plt
# # tsne = manifold.TSNE(n_components=2, init='pca', random_state=0)
# # trans_data = tsne.fit_transform(feats).T
# # ax = fig.add_subplot(2, 5, 10)
# # plt.scatter(trans_data[0], trans_data[1], c=colors, cmap=plt.cm.rainbow)
# # plt.title("t-SNE (%.2g sec)" % (t1 - t0))
# # ax.xaxis.set_major_formatter(NullFormatter())
# # ax.yaxis.set_major_formatter(NullFormatter())
# # plt.axis('tight')
# print('--------')
def gridsearch_ratio_thresh(matches):
import sklearn
import sklearn.metrics
import vtool_ibeis as vt
# Param search for vsone
import plottool_ibeis as pt
pt.qt4ensure()
skf = sklearn.model_selection.StratifiedKFold(n_splits=10,
random_state=119372)
y = np.array([m.annot1['nid'] == m.annot2['nid'] for m in matches])
basis = {'ratio_thresh': np.linspace(.6, .7, 50).tolist()}
grid = ut.all_dict_combinations(basis)
xdata = np.array(ut.take_column(grid, 'ratio_thresh'))
def _ratio_thresh(y_true, match_list):
# Try and find optional ratio threshold
auc_list = []
for cfgdict in ut.ProgIter(grid, lbl='gridsearch'):
y_score = [
match.fs.compress(match.ratio_test_flags(cfgdict)).sum()
for match in match_list
]
auc = sklearn.metrics.roc_auc_score(y_true, y_score)
auc_list.append(auc)
auc_list = np.array(auc_list)
return auc_list
auc_list = _ratio_thresh(y, matches)
pt.plot(xdata, auc_list)
subx, suby = vt.argsubmaxima(auc_list, xdata)
best_ratio_thresh = subx[suby.argmax()]
skf_results = []
y_true = y
for train_idx, test_idx in skf.split(matches, y):
match_list_ = ut.take(matches, train_idx)
y_true = y.take(train_idx)
auc_list = _ratio_thresh(y_true, match_list_)
subx, suby = vt.argsubmaxima(auc_list, xdata, maxima_thresh=.8)
best_ratio_thresh = subx[suby.argmax()]
skf_results.append(best_ratio_thresh)
print('skf_results.append = %r' % (np.mean(skf_results),))
import utool
utool.embed()
| |
import os
import argparse
import logging
import pandas as pd
from pandas.core.series import Series
import requests
from whoosh import writing
from yaml import load, Loader
from whoosh.analysis import StemmingAnalyzer
from whoosh.filedb.filestore import RamStorage
from whoosh.fields import *
from whoosh.qparser import QueryParser
from whoosh.query import Variations
def load_taxonomy(base_category, taxonomy_file, taxonomy_url, fetch_online=False):
if fetch_online:
r = requests.get(taxonomy_url)
taxonomy_content = r.text
else:
taxonomy_content = open(taxonomy_file).read()
lines = taxonomy_content.split('\n')
if base_category:
filtered_lines = []
for index, bc in enumerate(base_category):
base_category[index] = bc.strip().lower()
for bc in base_category:
filtered_lines += [line for line in lines if line.strip().lower().startswith(bc.strip().lower())]
return filtered_lines
else:
return lines
def index_product_info(product_dict):
schema = Schema(path=ID(stored=True, analyzer=StemmingAnalyzer()),
content=TEXT(stored=True, analyzer=StemmingAnalyzer()))
st = RamStorage()
st.create()
ix = st.create_index(schema)
writer = ix.writer()
for key in product_dict.keys():
writer.add_document(path=unicode(key, "utf-8"), content=unicode(product_dict[key], "utf-8"))
writer.commit(mergetype=writing.CLEAR)
return ix
def match(ix, category, weights=None):
# get the leaf of a category, e.g. only "Chairs" from Furniture > Chairs
index, c = get_category(category)
# adjust query
# replace comma and ampersand with OR
query = re.sub('[,&]', ' OR ', c)
with ix.searcher() as searcher:
parsed_query = QueryParser("content", schema=ix.schema, termclass=Variations).parse(query)
results = searcher.search(parsed_query, terms=True)
score = 0
if results:
logging.debug("Category: %s => Query: %s" % (category, query))
for r in results:
weight = 1
if weights:
weight = weights[r['path']]
logging.debug("Result: %s [score: %d weight: %d]" % (r, r.score, weight))
score += r.score * weight
return score
def get_category(string):
index = -1
name = None
if string:
for s in string.split(">"):
name = s.strip()
index += 1
return index, name
def get_best_match(matches):
if not matches:
return ''
# find most hits
best_score = 0
best_category = None
for match, score in matches.items():
if score > best_score:
best_score = score
best_category = match
# if equal score: choose the category with greater detail level
elif score == best_score:
index, name = get_category(best_category)
hit_index, hit_name = get_category(match)
if hit_index > index:
best_category = match
return best_category
def safe_get(row, column):
value = row.get(column)
if isinstance(value, basestring):
return value
return ''
if __name__ == "__main__":
# read command line arguments
parser = argparse.ArgumentParser(description='Finds category based on Google\'s taxonomy in a product description')
parser.add_argument('base_category', metavar='bc',
help='The base categories of the product. Can speed up execution a lot. Example: "Furniture", "Home & Garden"',
nargs="*")
parser.add_argument('-o', '--overwrite', const=True, nargs="?",
help='If set category column in product file will be overwritten')
parser.add_argument('--log', nargs="?", help="The log level")
args = parser.parse_args()
# logging
if args.log:
logging.basicConfig(level=args.log.upper())
# load settings
settings = {}
if os.path.exists("settings.yaml"):
settings = load(open("settings.yaml"), Loader=Loader)
taxonomy_file = settings.get("google_taxonomy_file", "taxonomy.en-US.txt")
taxonomy_url = settings.get("google_taxonomy_url", "http://www.google.com/basepages/producttype/taxonomy.en-GB.txt")
fetch_online = settings.get("fetch_taxonomy_online", True)
product_file = settings.get("product_file", "product.csv")
output_product_file = settings.get("output_product_file", "product.matched.csv")
product_columns = settings.get("product_columns", ["title", "product type", "description"])
product_column_weights = settings.get("product_column_weights", [3, 2, 1])
weights = {}
for index, pc in enumerate(product_columns):
weights[pc] = product_column_weights[index]
google_category_column = settings.get("google_category_column", "google product category")
if args.overwrite:
overwrite_category = True
else:
overwrite_category = settings.get("overwrite_category", False)
# load taxonomy
print "Loading taxonomy. Base categories: %s ..." % ", ".join(args.base_category)
categories = load_taxonomy(args.base_category, taxonomy_file=taxonomy_file, taxonomy_url=taxonomy_url,
fetch_online=fetch_online)
if not categories:
print "Error: base category %s not found in taxonomy" % args.base_category
if not args.base_category:
print "Warning: you did not specify a base category. This can take *very* long time to complete. See matcher -h for help."
# load product csv file
print "Parsing input file: %s" % product_file
product_data = pd.read_csv(product_file, sep='\t', usecols=product_columns + [google_category_column])
print "Processing %d rows ..." % product_data.shape[0]
# if target google category column doesnt exist in file: add
if not google_category_column in product_data.columns:
product_data[google_category_column] = Series()
# iterate through data row by row and match category
index = 1
replacements = 0
for row_index, row in product_data.iterrows():
index += 1
if index % 10 == 0:
print "Progress: %d rows finished" % index
p = {}
for col in product_columns:
value = safe_get(row, col)
if value:
p[col] = row.get(col)
gcat = safe_get(row, google_category_column)
# create index of product fields
ix = index_product_info(p)
# find all matches
matches = {}
for category in categories:
if not category:
continue
score = match(ix, category, weights)
if score:
if not matches.get(category):
matches[category] = score
else:
matches[category] += score
# select best match
best_match = get_best_match(matches)
logging.debug("MATCHES: %s" % str(matches))
logging.debug("======> best match: %s" % best_match)
if not gcat or overwrite_category:
if best_match:
product_data.ix[index - 2, google_category_column] = best_match
# row[google_category_column] = best_match
replacements += 1
# write back result
# copy category column into original file
gcat_col = product_data[google_category_column]
original_data = pd.read_csv(product_file, sep='\t')
original_data[google_category_column] = gcat_col
original_data.to_csv(output_product_file, sep='\t', index=False)
print "processed %d rows of '%s', replaced %d, output written to '%s'" % (
(index - 1), product_file, replacements, output_product_file)
| |
# Default Django settings. Override these with settings in the module
# pointed-to by the DJANGO_SETTINGS_MODULE environment variable.
# This is defined here as a do-nothing function because we can't import
# django.utils.translation -- that module depends on the settings.
gettext_noop = lambda s: s
####################
# CORE #
####################
DEBUG = False
TEMPLATE_DEBUG = False
# Whether the framework should propagate raw exceptions rather than catching
# them. This is useful under some testing situations and should never be used
# on a live site.
DEBUG_PROPAGATE_EXCEPTIONS = False
# Whether to use the "Etag" header. This saves bandwidth but slows down performance.
USE_ETAGS = False
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'), ('Full Name', 'anotheremail@example.com'))
ADMINS = ()
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ()
# Local time zone for this installation. All choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name (although not all
# systems may support all possibilities). When USE_TZ is True, this is
# interpreted as the default user time zone.
TIME_ZONE = 'America/Chicago'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = False
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Languages we provide translations for, out of the box. The language name
# should be the utf-8 encoded local name for the language.
LANGUAGES = (
('ar', gettext_noop('Arabic')),
('az', gettext_noop('Azerbaijani')),
('bg', gettext_noop('Bulgarian')),
('bn', gettext_noop('Bengali')),
('bs', gettext_noop('Bosnian')),
('ca', gettext_noop('Catalan')),
('cs', gettext_noop('Czech')),
('cy', gettext_noop('Welsh')),
('da', gettext_noop('Danish')),
('de', gettext_noop('German')),
('el', gettext_noop('Greek')),
('en', gettext_noop('English')),
('en-gb', gettext_noop('British English')),
('eo', gettext_noop('Esperanto')),
('es', gettext_noop('Spanish')),
('es-ar', gettext_noop('Argentinian Spanish')),
('es-mx', gettext_noop('Mexican Spanish')),
('es-ni', gettext_noop('Nicaraguan Spanish')),
('et', gettext_noop('Estonian')),
('eu', gettext_noop('Basque')),
('fa', gettext_noop('Persian')),
('fi', gettext_noop('Finnish')),
('fr', gettext_noop('French')),
('fy-nl', gettext_noop('Frisian')),
('ga', gettext_noop('Irish')),
('gl', gettext_noop('Galician')),
('he', gettext_noop('Hebrew')),
('hi', gettext_noop('Hindi')),
('hr', gettext_noop('Croatian')),
('hu', gettext_noop('Hungarian')),
('id', gettext_noop('Indonesian')),
('is', gettext_noop('Icelandic')),
('it', gettext_noop('Italian')),
('ja', gettext_noop('Japanese')),
('ka', gettext_noop('Georgian')),
('kk', gettext_noop('Kazakh')),
('km', gettext_noop('Khmer')),
('kn', gettext_noop('Kannada')),
('ko', gettext_noop('Korean')),
('lt', gettext_noop('Lithuanian')),
('lv', gettext_noop('Latvian')),
('mk', gettext_noop('Macedonian')),
('ml', gettext_noop('Malayalam')),
('mn', gettext_noop('Mongolian')),
('nb', gettext_noop('Norwegian Bokmal')),
('ne', gettext_noop('Nepali')),
('nl', gettext_noop('Dutch')),
('nn', gettext_noop('Norwegian Nynorsk')),
('pa', gettext_noop('Punjabi')),
('pl', gettext_noop('Polish')),
('pt', gettext_noop('Portuguese')),
('pt-br', gettext_noop('Brazilian Portuguese')),
('ro', gettext_noop('Romanian')),
('ru', gettext_noop('Russian')),
('sk', gettext_noop('Slovak')),
('sl', gettext_noop('Slovenian')),
('sq', gettext_noop('Albanian')),
('sr', gettext_noop('Serbian')),
('sr-latn', gettext_noop('Serbian Latin')),
('sv', gettext_noop('Swedish')),
('sw', gettext_noop('Swahili')),
('ta', gettext_noop('Tamil')),
('te', gettext_noop('Telugu')),
('th', gettext_noop('Thai')),
('tr', gettext_noop('Turkish')),
('tt', gettext_noop('Tatar')),
('uk', gettext_noop('Ukrainian')),
('ur', gettext_noop('Urdu')),
('vi', gettext_noop('Vietnamese')),
('zh-cn', gettext_noop('Simplified Chinese')),
('zh-tw', gettext_noop('Traditional Chinese')),
)
# Languages using BiDi (right-to-left) layout
LANGUAGES_BIDI = ("he", "ar", "fa")
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
LOCALE_PATHS = ()
LANGUAGE_COOKIE_NAME = 'django_language'
# If you set this to True, Django will format dates, numbers and calendars
# according to user current locale.
USE_L10N = False
# Not-necessarily-technical managers of the site. They get broken link
# notifications and other various emails.
MANAGERS = ADMINS
# Default content type and charset to use for all HttpResponse objects, if a
# MIME type isn't manually specified. These are used to construct the
# Content-Type header.
DEFAULT_CONTENT_TYPE = 'text/html'
DEFAULT_CHARSET = 'utf-8'
# Encoding of files read from disk (template and initial SQL files).
FILE_CHARSET = 'utf-8'
# E-mail address that error messages come from.
SERVER_EMAIL = 'root@localhost'
# Whether to send broken-link emails.
SEND_BROKEN_LINK_EMAILS = False
# Database connection info.
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.dummy',
},
}
# Classes used to implement DB routing behavior.
DATABASE_ROUTERS = []
# The email backend to use. For possible shortcuts see django.core.mail.
# The default is to use the SMTP backend.
# Third-party backends can be specified by providing a Python path
# to a module that defines an EmailBackend class.
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
# Host for sending email.
EMAIL_HOST = 'localhost'
# Port for sending email.
EMAIL_PORT = 25
# Optional SMTP authentication information for EMAIL_HOST.
EMAIL_HOST_USER = ''
EMAIL_HOST_PASSWORD = ''
EMAIL_USE_TLS = False
# List of strings representing installed apps.
INSTALLED_APPS = ()
# List of locations of the template source files, in search order.
TEMPLATE_DIRS = ()
# List of callables that know how to import templates from various sources.
# See the comments in django/core/template/loader.py for interface
# documentation.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
# 'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
# Output to use in template system for invalid (e.g. misspelled) variables.
TEMPLATE_STRING_IF_INVALID = ''
# Default email address to use for various automated correspondence from
# the site managers.
DEFAULT_FROM_EMAIL = 'webmaster@localhost'
# Subject-line prefix for email messages send with django.core.mail.mail_admins
# or ...mail_managers. Make sure to include the trailing space.
EMAIL_SUBJECT_PREFIX = '[Django] '
# Whether to append trailing slashes to URLs.
APPEND_SLASH = True
# Whether to prepend the "www." subdomain to URLs that don't have it.
PREPEND_WWW = False
# Override the server-derived value of SCRIPT_NAME
FORCE_SCRIPT_NAME = None
# List of compiled regular expression objects representing User-Agent strings
# that are not allowed to visit any page, systemwide. Use this for bad
# robots/crawlers. Here are a few examples:
# import re
# DISALLOWED_USER_AGENTS = (
# re.compile(r'^NaverBot.*'),
# re.compile(r'^EmailSiphon.*'),
# re.compile(r'^SiteSucker.*'),
# re.compile(r'^sohu-search')
# )
DISALLOWED_USER_AGENTS = ()
ABSOLUTE_URL_OVERRIDES = {}
# Tuple of strings representing allowed prefixes for the {% ssi %} tag.
# Example: ('/home/html', '/var/www')
ALLOWED_INCLUDE_ROOTS = ()
# If this is a admin settings module, this should be a list of
# settings modules (in the format 'foo.bar.baz') for which this admin
# is an admin.
ADMIN_FOR = ()
# List of compiled regular expression objects representing URLs that need not
# be reported when SEND_BROKEN_LINK_EMAILS is True. Here are a few examples:
# import re
# IGNORABLE_404_URLS = (
# re.compile(r'^/apple-touch-icon.*\.png$'),
# re.compile(r'^/favicon.ico$),
# re.compile(r'^/robots.txt$),
# re.compile(r'^/phpmyadmin/),
# re.compile(r'\.(cgi|php|pl)$'),
# )
IGNORABLE_404_URLS = ()
# A secret key for this particular Django installation. Used in secret-key
# hashing algorithms. Set this in your settings, or Django will complain
# loudly.
SECRET_KEY = ''
# Default file storage mechanism that holds media.
DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT.
# Example: "http://media.lawrence.com/media/"
MEDIA_URL = ''
# Absolute path to the directory that holds static files.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL that handles the static files served from STATIC_ROOT.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = None
# List of upload handler classes to be applied in order.
FILE_UPLOAD_HANDLERS = (
'django.core.files.uploadhandler.MemoryFileUploadHandler',
'django.core.files.uploadhandler.TemporaryFileUploadHandler',
)
# Maximum size, in bytes, of a request before it will be streamed to the
# file system instead of into memory.
FILE_UPLOAD_MAX_MEMORY_SIZE = 2621440 # i.e. 2.5 MB
# Directory in which upload streamed files will be temporarily saved. A value of
# `None` will make Django use the operating system's default temporary directory
# (i.e. "/tmp" on *nix systems).
FILE_UPLOAD_TEMP_DIR = None
# The numeric mode to set newly-uploaded files to. The value should be a mode
# you'd pass directly to os.chmod; see http://docs.python.org/lib/os-file-dir.html.
FILE_UPLOAD_PERMISSIONS = None
# Python module path where user will place custom format definition.
# The directory where this setting is pointing should contain subdirectories
# named as the locales, containing a formats.py file
# (i.e. "myproject.locale" for myproject/locale/en/formats.py etc. use)
FORMAT_MODULE_PATH = None
# Default formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
# Default formatting for datetime objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATETIME_FORMAT = 'N j, Y, P'
# Default formatting for time objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
TIME_FORMAT = 'P'
# Default formatting for date objects when only the year and month are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
YEAR_MONTH_FORMAT = 'F Y'
# Default formatting for date objects when only the month and day are relevant.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
MONTH_DAY_FORMAT = 'F j'
# Default short formatting for date objects. See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATE_FORMAT = 'm/d/Y'
# Default short formatting for datetime objects.
# See all available format strings here:
# http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
SHORT_DATETIME_FORMAT = 'm/d/Y P'
# Default formats to be used when parsing dates from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
'%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
'%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
'%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
'%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
# Default formats to be used when parsing times from input boxes, in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
# Default formats to be used when parsing dates and times from input boxes,
# in order
# See all available format string here:
# http://docs.python.org/library/datetime.html#strftime-behavior
# * Note that these format strings are different from the ones to display dates
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 0
# Decimal separator symbol
DECIMAL_SEPARATOR = '.'
# Boolean that sets whether to add thousand separator when formatting numbers
USE_THOUSAND_SEPARATOR = False
# Number of digits that will be together, when splitting them by
# THOUSAND_SEPARATOR. 0 means no grouping, 3 means splitting by thousands...
NUMBER_GROUPING = 0
# Thousand separator symbol
THOUSAND_SEPARATOR = ','
# Do you want to manage transactions manually?
# Hint: you really don't!
TRANSACTIONS_MANAGED = False
# The User-Agent string to use when checking for URL validity through the
# isExistingURL validator.
from django import get_version
URL_VALIDATOR_USER_AGENT = "Django/%s (https://www.djangoproject.com)" % get_version()
# The tablespaces to use for each model when not specified otherwise.
DEFAULT_TABLESPACE = ''
DEFAULT_INDEX_TABLESPACE = ''
# Default X-Frame-Options header value
X_FRAME_OPTIONS = 'SAMEORIGIN'
USE_X_FORWARDED_HOST = False
# The Python dotted path to the WSGI application that Django's internal servers
# (runserver, runfcgi) will use. If `None`, the return value of
# 'django.core.wsgi.get_wsgi_application' is used, thus preserving the same
# behavior as previous versions of Django. Otherwise this should point to an
# actual WSGI application object.
WSGI_APPLICATION = None
# If your Django app is behind a proxy that sets a header to specify secure
# connections, AND that proxy ensures that user-submitted headers with the
# same name are ignored (so that people can't spoof it), set this value to
# a tuple of (header_name, header_value). For any requests that come in with
# that header/value, request.is_secure() will return True.
# WARNING! Only set this if you fully understand what you're doing. Otherwise,
# you may be opening yourself up to a security risk.
SECURE_PROXY_SSL_HEADER = None
##############
# MIDDLEWARE #
##############
# List of middleware classes to use. Order is important; in the request phase,
# this middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# 'django.middleware.http.ConditionalGetMiddleware',
# 'django.middleware.gzip.GZipMiddleware',
)
############
# SESSIONS #
############
SESSION_COOKIE_NAME = 'sessionid' # Cookie name. This can be whatever you want.
SESSION_COOKIE_AGE = 60 * 60 * 24 * 7 * 2 # Age of cookie, in seconds (default: 2 weeks).
SESSION_COOKIE_DOMAIN = None # A string like ".lawrence.com", or None for standard domain cookie.
SESSION_COOKIE_SECURE = False # Whether the session cookie should be secure (https:// only).
SESSION_COOKIE_PATH = '/' # The path of the session cookie.
SESSION_COOKIE_HTTPONLY = True # Whether to use the non-RFC standard httpOnly flag (IE, FF3+, others)
SESSION_SAVE_EVERY_REQUEST = False # Whether to save the session data on every request.
SESSION_EXPIRE_AT_BROWSER_CLOSE = False # Whether a user's session cookie expires when the Web browser is closed.
SESSION_ENGINE = 'django.contrib.sessions.backends.db' # The module to store session data
SESSION_FILE_PATH = None # Directory to store session files if using the file session module. If None, the backend will use a sensible default.
#########
# CACHE #
#########
# New format
CACHES = {
}
# The cache backend to use. See the docstring in django.core.cache for the
# possible values.
CACHE_MIDDLEWARE_KEY_PREFIX = ''
CACHE_MIDDLEWARE_SECONDS = 600
CACHE_MIDDLEWARE_ALIAS = 'default'
####################
# COMMENTS #
####################
COMMENTS_ALLOW_PROFANITIES = False
# The profanities that will trigger a validation error in
# CommentDetailsForm.clean_comment. All of these should be in lowercase.
PROFANITIES_LIST = ()
##################
# AUTHENTICATION #
##################
AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
LOGIN_URL = '/accounts/login/'
LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/accounts/profile/'
# The number of days a password reset link is valid for
PASSWORD_RESET_TIMEOUT_DAYS = 3
# the first hasher in this list is the preferred algorithm. any
# password using different algorithms will be converted automatically
# upon login
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.UnsaltedMD5PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
###########
# SIGNING #
###########
SIGNING_BACKEND = 'django.core.signing.TimestampSigner'
########
# CSRF #
########
# Dotted path to callable to be used as view when a request is
# rejected by the CSRF middleware.
CSRF_FAILURE_VIEW = 'django.views.csrf.csrf_failure'
# Settings for CSRF cookie.
CSRF_COOKIE_NAME = 'csrftoken'
CSRF_COOKIE_DOMAIN = None
CSRF_COOKIE_PATH = '/'
CSRF_COOKIE_SECURE = False
############
# MESSAGES #
############
# Class to use as messages backend
MESSAGE_STORAGE = 'django.contrib.messages.storage.fallback.FallbackStorage'
# Default values of MESSAGE_LEVEL and MESSAGE_TAGS are defined within
# django.contrib.messages to avoid imports in this settings file.
###########
# LOGGING #
###########
# The callable to use to configure logging
LOGGING_CONFIG = 'django.utils.log.dictConfig'
# The default logging configuration. This sends an email to
# the site admins on every HTTP 500 error. All other log
# records are sent to the bit bucket.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Default exception reporter filter class used in case none has been
# specifically assigned to the HttpRequest instance.
DEFAULT_EXCEPTION_REPORTER_FILTER = 'django.views.debug.SafeExceptionReporterFilter'
###########
# TESTING #
###########
# The name of the class to use to run the test suite
TEST_RUNNER = 'django.test.simple.DjangoTestSuiteRunner'
############
# FIXTURES #
############
# The list of directories to search for fixtures
FIXTURE_DIRS = ()
###############
# STATICFILES #
###############
# A list of locations of additional static files
STATICFILES_DIRS = ()
# The default file storage backend used during the build process
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
"""
Created on Jun 9, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Jun 9, 2012"
import unittest
import os
from pymatgen.matproj.rest import MPRester, MPRestError
from pymatgen.core.periodic_table import Element
from pymatgen.core.structure import Structure, Composition
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.electronic_structure.dos import CompleteDos
from pymatgen.electronic_structure.bandstructure import BandStructureSymmLine
from pymatgen.entries.compatibility import MaterialsProjectCompatibility
from pymatgen.phasediagram.pdmaker import PhaseDiagram
from pymatgen.phasediagram.pdanalyzer import PDAnalyzer
from pymatgen.io.cif import CifParser
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
@unittest.skipIf("MAPI_KEY" not in os.environ,
"MAPI_KEY environment variable not set.")
class MPResterTest(unittest.TestCase):
def setUp(self):
self.rester = MPRester()
def test_get_data(self):
props = ["energy", "energy_per_atom", "formation_energy_per_atom",
"nsites", "unit_cell_formula", "pretty_formula", "is_hubbard",
"elements", "nelements", "e_above_hull", "hubbards",
"is_compatible", "task_ids",
"density", "icsd_ids", "total_magnetization"]
# unicode literals have been reintroduced in py>3.2
expected_vals = [-191.33812137, -6.833504334642858, -2.551358929370749,
28, {k: v for k, v in {'P': 4, 'Fe': 4, 'O': 16, 'Li': 4}.items()},
"LiFePO4", True, ['Li', 'O', 'P', 'Fe'], 4, 0.0,
{k: v for k, v in {'Fe': 5.3, 'Li': 0.0, 'O': 0.0, 'P': 0.0}.items()}, True,
[u'mp-601412', u'mp-19017', u'mp-796535', u'mp-797820',
u'mp-540081', u'mp-797269'],
3.4662026991351147,
[159107, 154117, 160776, 99860, 181272, 166815,
260571, 92198, 165000, 155580, 38209, 161479, 153699,
260569, 260570, 200155, 260572, 181341, 181342,
72545, 56291, 97764, 162282, 155635],
16.0002716]
for (i, prop) in enumerate(props):
if prop not in ['hubbards', 'unit_cell_formula', 'elements',
'icsd_ids', 'task_ids']:
val = self.rester.get_data("mp-19017", prop=prop)[0][prop]
self.assertAlmostEqual(expected_vals[i], val)
elif prop in ["elements", "icsd_ids", "task_ids"]:
self.assertEqual(set(expected_vals[i]),
set(self.rester.get_data("mp-19017",
prop=prop)[0][prop]))
else:
self.assertEqual(expected_vals[i],
self.rester.get_data("mp-19017",
prop=prop)[0][prop])
props = ['structure', 'initial_structure', 'final_structure', 'entry']
for prop in props:
obj = self.rester.get_data("mp-19017", prop=prop)[0][prop]
if prop.endswith("structure"):
self.assertIsInstance(obj, Structure)
elif prop == "entry":
obj = self.rester.get_data("mp-19017", prop=prop)[0][prop]
self.assertIsInstance(obj, ComputedEntry)
#Test chemsys search
data = self.rester.get_data('Fe-Li-O', prop='unit_cell_formula')
self.assertTrue(len(data) > 1)
elements = {Element("Li"), Element("Fe"), Element("O")}
for d in data:
self.assertTrue(
set(Composition(d['unit_cell_formula']).elements).issubset(
elements))
self.assertRaises(MPRestError, self.rester.get_data, "Fe2O3",
"badmethod")
def test_get_materials_id_from_task_id(self):
self.assertEqual(self.rester.get_materials_id_from_task_id(
"mp-540081"), "mp-19017")
def test_get_materials_id_references(self):
# nosetests pymatgen/matproj/tests/test_rest.py:MPResterTest.test_get_materials_id_references
# self.rester points to rest/v2 by default which doesn't have the refs endpoint
m = MPRester(endpoint="https://www.materialsproject.org/rest")
data = m.get_materials_id_references('mp-123')
self.assertTrue(len(data) > 1000)
def test_find_structure(self):
# nosetests pymatgen/matproj/tests/test_rest.py:MPResterTest.test_find_structure
# self.rester points to rest/v2 by default which doesn't have the find_structure endpoint
m = MPRester(endpoint="https://www.materialsproject.org/rest")
ciffile = os.path.join(test_dir, 'Fe3O4.cif')
data = m.find_structure(ciffile)
self.assertTrue(len(data) > 1)
s = CifParser(ciffile).get_structures()[0]
data = m.find_structure(s)
self.assertTrue(len(data) > 1)
def test_get_entries_in_chemsys(self):
syms = ["Li", "Fe", "O"]
entries = self.rester.get_entries_in_chemsys(syms)
elements = set([Element(sym) for sym in syms])
for e in entries:
self.assertIsInstance(e, ComputedEntry)
self.assertTrue(set(e.composition.elements).issubset(elements))
def test_get_structure_by_material_id(self):
s1 = self.rester.get_structure_by_material_id("mp-1")
self.assertEqual(s1.formula, "Cs1")
def test_get_entry_by_material_id(self):
e = self.rester.get_entry_by_material_id("mp-19017")
self.assertIsInstance(e, ComputedEntry)
self.assertTrue(e.composition.reduced_formula, "LiFePO4")
def test_query(self):
criteria = {'elements': {'$in': ['Li', 'Na', 'K'], '$all': ['O']}}
props = ['pretty_formula', 'energy']
data = self.rester.query(criteria=criteria, properties=props)
self.assertTrue(len(data) > 6)
data = self.rester.query(criteria="*2O", properties=props)
self.assertGreaterEqual(len(data), 52)
self.assertIn("Li2O", (d["pretty_formula"] for d in data))
def test_get_exp_thermo_data(self):
data = self.rester.get_exp_thermo_data("Fe2O3")
self.assertTrue(len(data) > 0)
for d in data:
self.assertEqual(d.formula, "Fe2O3")
def test_get_dos_by_id(self):
dos = self.rester.get_dos_by_material_id("mp-2254")
self.assertIsInstance(dos, CompleteDos)
def test_get_bandstructure_by_material_id(self):
bs = self.rester.get_bandstructure_by_material_id("mp-2254")
self.assertIsInstance(bs, BandStructureSymmLine)
def test_get_structures(self):
structs = self.rester.get_structures("Mn3O4")
self.assertTrue(len(structs) > 0)
def test_get_entries(self):
entries = self.rester.get_entries("TiO2")
self.assertTrue(len(entries) > 1)
for e in entries:
self.assertEqual(e.composition.reduced_formula, "TiO2")
entries = self.rester.get_entries("TiO2", inc_structure="final")
self.assertTrue(len(entries) > 1)
for e in entries:
self.assertEqual(e.structure.composition.reduced_formula, "TiO2")
all_entries = self.rester.get_entries("Fe", compatible_only=False)
entries = self.rester.get_entries("Fe", compatible_only=True)
self.assertTrue(len(entries) < len(all_entries))
def test_get_exp_entry(self):
entry = self.rester.get_exp_entry("Fe2O3")
self.assertEqual(entry.energy, -825.5)
def test_submit_query_delete_snl(self):
s = Structure([[5, 0, 0], [0, 5, 0], [0, 0, 5]], ["Fe"], [[0, 0, 0]])
# d = self.rester.submit_snl(
# [s, s], remarks=["unittest"],
# authors="Test User <test@materialsproject.com>")
# self.assertEqual(len(d), 2)
# data = self.rester.query_snl({"about.remarks": "unittest"})
# self.assertEqual(len(data), 2)
# snlids = [d["_id"] for d in data]
# self.rester.delete_snl(snlids)
# data = self.rester.query_snl({"about.remarks": "unittest"})
# self.assertEqual(len(data), 0)
def test_get_stability(self):
entries = self.rester.get_entries_in_chemsys(["Fe", "O"])
modified_entries = []
for entry in entries:
# Create modified entries with energies that are 0.01eV higher
# than the corresponding entries.
if entry.composition.reduced_formula == "Fe2O3":
modified_entries.append(
ComputedEntry(entry.composition,
entry.uncorrected_energy + 0.01,
parameters=entry.parameters,
entry_id="mod_{}".format(entry.entry_id)))
rest_ehulls = self.rester.get_stability(modified_entries)
all_entries = entries + modified_entries
compat = MaterialsProjectCompatibility()
all_entries = compat.process_entries(all_entries)
pd = PhaseDiagram(all_entries)
a = PDAnalyzer(pd)
for e in all_entries:
if str(e.entry_id).startswith("mod"):
for d in rest_ehulls:
if d["entry_id"] == e.entry_id:
data = d
break
self.assertAlmostEqual(a.get_e_above_hull(e),
data["e_above_hull"])
def test_get_reaction(self):
rxn = self.rester.get_reaction(["Li", "O"], ["Li2O"])
self.assertIn("Li2O", rxn["Experimental_references"])
def test_parse_criteria(self):
crit = MPRester.parse_criteria("mp-1234 Li-*")
self.assertIn("Li-O", crit["$or"][1]["chemsys"]["$in"])
self.assertIn({"task_id": "mp-1234"}, crit["$or"])
crit = MPRester.parse_criteria("Li2*")
self.assertIn("Li2O", crit["pretty_formula"]["$in"])
self.assertIn("Li2I", crit["pretty_formula"]["$in"])
self.assertIn("CsLi2", crit["pretty_formula"]["$in"])
crit = MPRester.parse_criteria("Li-*-*")
self.assertIn("Li-Re-Ru", crit["chemsys"]["$in"])
self.assertNotIn("Li-Li", crit["chemsys"]["$in"])
comps = MPRester.parse_criteria("**O3")["pretty_formula"]["$in"]
for c in comps:
self.assertEqual(len(Composition(c)), 3, "Failed in %s" % c)
chemsys = MPRester.parse_criteria("{Fe,Mn}-O")["chemsys"]["$in"]
self.assertEqual(len(chemsys), 2)
comps = MPRester.parse_criteria("{Fe,Mn,Co}O")["pretty_formula"]["$in"]
self.assertEqual(len(comps), 3)
#Let's test some invalid symbols
self.assertRaises(KeyError, MPRester.parse_criteria, "li-fe")
self.assertRaises(KeyError, MPRester.parse_criteria, "LO2")
if __name__ == "__main__":
unittest.main()
| |
import re
from django.test import TestCase
from awl.tests.admin import RankAdmin
from awl.tests.models import Alone, Grouped
from awl.waelsteng import AdminToolsMixin
from awl.utils import refetch
# ============================================================================
class RankModelBase(TestCase, AdminToolsMixin):
def assertValues(self, objs, expected):
names = [o.name for o in objs]
compare = expected.split(',')
self.assertEqual(compare, names)
def in_order(self):
a = self.klass.objects.create(name='a', group='y')
self.klass.objects.create(name='b', group='y')
self.klass.objects.create(name='c', group='y')
self.assertValues(a.grouped_filter(), 'a,b,c')
def forced_order(self):
a = self.klass.objects.create(name='a', group='y')
self.klass.objects.create(name='c', group='y')
self.klass.objects.create(name='b', rank=2, group='y')
self.assertValues(a.grouped_filter(), 'a,b,c')
def same_order(self):
a = self.klass.objects.create(name='a', group='y')
a.rank = 1
a.save()
self.klass.objects.create(name='c', group='y')
self.klass.objects.create(name='b', rank=2, group='y')
self.assertValues(a.grouped_filter(), 'a,b,c')
def negative(self):
b = self.klass.objects.create(name='b', rank=-10, group='y')
self.assertEqual(1, b.rank)
a = self.klass.objects.create(name='a', rank=-10, group='y')
self.assertEqual(1, a.rank)
self.assertValues(a.grouped_filter(), 'a,b')
def too_large(self):
a = self.klass.objects.create(name='a', rank=10, group='y')
self.assertEqual(1, a.rank)
b = self.klass.objects.create(name='b', rank=10, group='y')
self.assertEqual(2, b.rank)
self.assertValues(a.grouped_filter(), 'a,b')
def move(self):
a = self.klass.objects.create(name='a', group='y')
self.klass.objects.create(name='b', group='y')
d = self.klass.objects.create(name='d', group='y')
c = self.klass.objects.create(name='c', group='y')
# test a simple move
#import pudb; pudb.set_trace()
c.rank -= 1
c.save()
self.assertValues(a.grouped_filter(), 'a,b,c,d')
# test an out of bounds move
d = refetch(d)
d.rank += 5
d.save()
d = refetch(d)
self.assertEqual(4, d.rank)
self.assertValues(a.grouped_filter(), 'a,b,c,d')
# test moving in the list
a = refetch(a)
a.rank += 2
a.save()
self.assertValues(a.grouped_filter(), 'b,c,a,d')
a = refetch(a)
a.rank = 0
a.save()
a = refetch(a)
self.assertEqual(1, a.rank)
self.assertValues(a.grouped_filter(), 'a,b,c,d')
def repack(self):
a = self.klass.objects.create(name='a', group='y')
b = self.klass.objects.create(name='b', group='y')
c = self.klass.objects.create(name='c', group='y')
d = self.klass.objects.create(name='d', group='y')
b.delete()
a.repack()
a = refetch(a)
self.assertEqual(1, a.rank)
c = refetch(c)
self.assertEqual(2, c.rank)
d = refetch(d)
self.assertEqual(3, d.rank)
self.assertValues(a.grouped_filter(), 'a,c,d')
def admin(self):
self.initiate()
a = self.klass.objects.create(name='a', group='y')
b = self.klass.objects.create(name='b', group='y')
c = self.klass.objects.create(name='c', group='y')
rank_admin = RankAdmin(self.klass, self.site)
self.assertEqual('', self.field_value(rank_admin, a, 'move_up'))
self.assertNotEqual('', self.field_value(rank_admin, b, 'move_up'))
self.assertNotEqual('', self.field_value(rank_admin, c, 'move_up'))
self.assertNotEqual('', self.field_value(rank_admin, a, 'move_down'))
self.assertNotEqual('', self.field_value(rank_admin, b, 'move_down'))
self.assertEqual('', self.field_value(rank_admin, c, 'move_down'))
html = self.field_value(rank_admin, a, 'move_both')
self.assertEqual( html.count('rankedmodel/move'), 1 )
html = self.field_value(rank_admin, b, 'move_both')
self.assertEqual( html.count('rankedmodel/move'), 2 )
html = self.field_value(rank_admin, c, 'move_both')
self.assertEqual( html.count('rankedmodel/move'), 1 )
# use the view to move b up one
headers = {
'HTTP_REFERER':'/admin/',
}
self.visit_admin_link(rank_admin, b, 'move_up', response_code=302,
headers=headers)
self.assertValues(a.grouped_filter(), 'b,a,c')
# use the view to move a down one
a = refetch(a)
self.visit_admin_link(rank_admin, a, 'move_down', response_code=302,
headers=headers)
self.assertValues(a.grouped_filter(), 'b,c,a')
# Use the up-link from the "move_both" column
# -> url for c has two links, first match is up, regex group(1)
# is the link portion of the regex
c = refetch(c)
html = self.field_value(rank_admin, c, 'move_both')
pattern = re.compile('href="([^"]*)')
url = list(pattern.finditer(html))[0].group(1)
self.authed_get(url, response_code=302, headers=headers)
self.assertValues(c.grouped_filter(), 'c,b,a')
# Use the down-link from the "move_both" column
# -> url for b has two links, second match is down, regex group(1)
# is the link portion of the regex
b = refetch(b)
html = self.field_value(rank_admin, b, 'move_both')
pattern = re.compile('href="([^"]*)')
url = list(pattern.finditer(html))[1].group(1)
self.authed_get(url, response_code=302, headers=headers)
self.assertValues(c.grouped_filter(), 'c,a,b')
class AloneTests(RankModelBase):
def setUp(self):
self.klass = Alone
def test_in_order(self):
self.in_order()
def test_forced_order(self):
self.forced_order()
def test_same_order(self):
self.same_order()
def test_negative(self):
self.negative()
def test_too_large(self):
self.too_large()
def test_move(self):
self.move()
def test_repack(self):
self.repack()
def test_admin(self):
self.admin()
class GroupedTests(RankModelBase):
# Run the same tests as AloneTest, but have sub groups, at the end of each
# test make sure that the "x" group (which isn't manipulated) is not
# effected
def setUp(self):
self.klass = Grouped
Grouped.objects.create(group='x', name='a')
Grouped.objects.create(group='x', name='b')
Grouped.objects.create(group='x', name='c')
Grouped.objects.create(group='x', name='d')
def test_in_order(self):
self.in_order()
self.assertValues(self.klass.objects.filter(group='x'), 'a,b,c,d')
def test_forced_order(self):
self.forced_order()
self.assertValues(self.klass.objects.filter(group='x'), 'a,b,c,d')
def test_same_order(self):
self.same_order()
self.assertValues(self.klass.objects.filter(group='x'), 'a,b,c,d')
def test_negative(self):
self.negative()
self.assertValues(self.klass.objects.filter(group='x'), 'a,b,c,d')
def test_too_large(self):
self.too_large()
self.assertValues(self.klass.objects.filter(group='x'), 'a,b,c,d')
def test_move(self):
self.move()
self.assertValues(self.klass.objects.filter(group='x'), 'a,b,c,d')
def test_repack(self):
self.repack()
self.assertValues(self.klass.objects.filter(group='x'), 'a,b,c,d')
def test_admin(self):
self.admin()
self.assertValues(self.klass.objects.filter(group='x'), 'a,b,c,d')
| |
"""
Flux for Home-Assistant.
The idea was taken from https://github.com/KpaBap/hue-flux/
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/switch.flux/
"""
from datetime import time
import logging
import voluptuous as vol
from homeassistant.components.light import is_on, turn_on
from homeassistant.components.sun import next_setting, next_rising
from homeassistant.components.switch import DOMAIN, SwitchDevice
from homeassistant.const import CONF_NAME, CONF_PLATFORM
from homeassistant.helpers.event import track_time_change
from homeassistant.util.color import (
color_temperature_to_rgb, color_RGB_to_xy,
color_temperature_kelvin_to_mired, HASS_COLOR_MIN, HASS_COLOR_MAX)
from homeassistant.util.dt import now as dt_now
import homeassistant.helpers.config_validation as cv
DEPENDENCIES = ['sun', 'light']
SUN = "sun.sun"
_LOGGER = logging.getLogger(__name__)
CONF_LIGHTS = 'lights'
CONF_START_TIME = 'start_time'
CONF_STOP_TIME = 'stop_time'
CONF_START_CT = 'start_colortemp'
CONF_SUNSET_CT = 'sunset_colortemp'
CONF_STOP_CT = 'stop_colortemp'
CONF_BRIGHTNESS = 'brightness'
CONF_DISABLE_BRIGTNESS_ADJUST = 'disable_brightness_adjust'
CONF_MODE = 'mode'
MODE_XY = 'xy'
MODE_MIRED = 'mired'
DEFAULT_MODE = MODE_XY
PLATFORM_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): 'flux',
vol.Required(CONF_LIGHTS): cv.entity_ids,
vol.Optional(CONF_NAME, default="Flux"): cv.string,
vol.Optional(CONF_START_TIME): cv.time,
vol.Optional(CONF_STOP_TIME, default=time(22, 0)): cv.time,
vol.Optional(CONF_START_CT, default=4000):
vol.All(vol.Coerce(int), vol.Range(min=1000, max=40000)),
vol.Optional(CONF_SUNSET_CT, default=3000):
vol.All(vol.Coerce(int), vol.Range(min=1000, max=40000)),
vol.Optional(CONF_STOP_CT, default=1900):
vol.All(vol.Coerce(int), vol.Range(min=1000, max=40000)),
vol.Optional(CONF_BRIGHTNESS):
vol.All(vol.Coerce(int), vol.Range(min=0, max=255)),
vol.Optional(CONF_DISABLE_BRIGTNESS_ADJUST): cv.boolean,
vol.Optional(CONF_MODE, default=DEFAULT_MODE):
vol.Any(MODE_XY, MODE_MIRED)
})
def set_lights_xy(hass, lights, x_val, y_val, brightness):
"""Set color of array of lights."""
for light in lights:
if is_on(hass, light):
turn_on(hass, light,
xy_color=[x_val, y_val],
brightness=brightness,
transition=30)
def set_lights_temp(hass, lights, mired, brightness):
"""Set color of array of lights."""
for light in lights:
if is_on(hass, light):
turn_on(hass, light,
color_temp=int(mired),
brightness=brightness,
transition=30)
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Flux switches."""
name = config.get(CONF_NAME)
lights = config.get(CONF_LIGHTS)
start_time = config.get(CONF_START_TIME)
stop_time = config.get(CONF_STOP_TIME)
start_colortemp = config.get(CONF_START_CT)
sunset_colortemp = config.get(CONF_SUNSET_CT)
stop_colortemp = config.get(CONF_STOP_CT)
brightness = config.get(CONF_BRIGHTNESS)
disable_brightness_adjust = config.get(CONF_DISABLE_BRIGTNESS_ADJUST)
mode = config.get(CONF_MODE)
flux = FluxSwitch(name, hass, False, lights, start_time, stop_time,
start_colortemp, sunset_colortemp, stop_colortemp,
brightness, disable_brightness_adjust, mode)
add_devices([flux])
def update(call=None):
"""Update lights."""
flux.flux_update()
hass.services.register(DOMAIN, name + '_update', update)
class FluxSwitch(SwitchDevice):
"""Representation of a Flux switch."""
def __init__(self, name, hass, state, lights, start_time, stop_time,
start_colortemp, sunset_colortemp, stop_colortemp,
brightness, disable_brightness_adjust, mode):
"""Initialize the Flux switch."""
self._name = name
self.hass = hass
self._state = state
self._lights = lights
self._start_time = start_time
self._stop_time = stop_time
self._start_colortemp = start_colortemp
self._sunset_colortemp = sunset_colortemp
self._stop_colortemp = stop_colortemp
self._brightness = brightness
self._disable_brightness_adjust = disable_brightness_adjust
self._mode = mode
self.unsub_tracker = None
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn on flux."""
if not self._state: # make initial update
self.flux_update()
self._state = True
self.unsub_tracker = track_time_change(self.hass, self.flux_update,
second=[0, 30])
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn off flux."""
if self.unsub_tracker is not None:
self.unsub_tracker()
self.unsub_tracker = None
self._state = False
self.schedule_update_ha_state()
def flux_update(self, now=None):
"""Update all the lights using flux."""
if now is None:
now = dt_now()
sunset = next_setting(self.hass, SUN).replace(day=now.day,
month=now.month,
year=now.year)
start_time = self.find_start_time(now)
stop_time = now.replace(hour=self._stop_time.hour,
minute=self._stop_time.minute,
second=0)
if start_time < now < sunset:
# Daytime
time_state = 'day'
temp_range = abs(self._start_colortemp - self._sunset_colortemp)
day_length = int(sunset.timestamp() - start_time.timestamp())
seconds_from_start = int(now.timestamp() - start_time.timestamp())
percentage_complete = seconds_from_start / day_length
temp_offset = temp_range * percentage_complete
if self._start_colortemp > self._sunset_colortemp:
temp = self._start_colortemp - temp_offset
else:
temp = self._start_colortemp + temp_offset
else:
# Nightime
time_state = 'night'
if now < stop_time and now > start_time:
now_time = now
else:
now_time = stop_time
temp_range = abs(self._sunset_colortemp - self._stop_colortemp)
night_length = int(stop_time.timestamp() - sunset.timestamp())
seconds_from_sunset = int(now_time.timestamp() -
sunset.timestamp())
percentage_complete = seconds_from_sunset / night_length
temp_offset = temp_range * percentage_complete
if self._sunset_colortemp > self._stop_colortemp:
temp = self._sunset_colortemp - temp_offset
else:
temp = self._sunset_colortemp + temp_offset
x_val, y_val, b_val = color_RGB_to_xy(*color_temperature_to_rgb(temp))
brightness = self._brightness if self._brightness else b_val
if self._disable_brightness_adjust:
brightness = None
if self._mode == MODE_XY:
set_lights_xy(self.hass, self._lights, x_val,
y_val, brightness)
_LOGGER.info("Lights updated to x:%s y:%s brightness:%s, %s%%"
" of %s cycle complete at %s", x_val, y_val,
brightness, round(
percentage_complete * 100), time_state, now)
else:
# Convert to mired and clamp to allowed values
mired = color_temperature_kelvin_to_mired(temp)
mired = max(HASS_COLOR_MIN, min(mired, HASS_COLOR_MAX))
set_lights_temp(self.hass, self._lights, mired, brightness)
_LOGGER.info("Lights updated to mired:%s brightness:%s, %s%%"
" of %s cycle complete at %s", mired, brightness,
round(percentage_complete * 100), time_state, now)
def find_start_time(self, now):
"""Return sunrise or start_time if given."""
if self._start_time:
sunrise = now.replace(hour=self._start_time.hour,
minute=self._start_time.minute,
second=0)
else:
sunrise = next_rising(self.hass, SUN).replace(day=now.day,
month=now.month,
year=now.year)
return sunrise
| |
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import DataMigration
class Migration(DataMigration):
def forwards(self, orm):
for page in orm.Page.objects.all():
if page.menu_login_required:
page.limit_visibility_in_menu = 1
else:
page.limit_visibility_in_menu = None
page.save()
def backwards(self, orm):
for page in orm.Page.objects.all():
if page.limit_visibility_in_menu==1:
page.menu_login_required = True
else:
page.menu_login_required = False
page.save()
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.CMSPlugin']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_moderate': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'object_name': 'Page'},
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'menu_login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderate_descendants': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'moderate_page': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'cms.pagemoderatorstate': {
'Meta': {'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_moderate': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': ['auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': "orm['auth.User']"}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': "orm['auth.User']"}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('publisher_is_draft', 'language', 'page'),)", 'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True', 'blank': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Title']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
| |
import os
import sys
import time
import sublime
import imp
application_command_classes = []
window_command_classes = []
text_command_classes = []
all_command_classes = [application_command_classes, window_command_classes, text_command_classes]
all_callbacks = {'on_new': [], 'on_clone': [], 'on_load': [], 'on_close': [],
'on_pre_save': [], 'on_post_save': [], 'on_modified': [],
'on_selection_modified': [],'on_activated': [], 'on_deactivated': [],
'on_project_load': [], 'on_project_close': [], 'on_query_context': [],
'on_query_completions': []}
def unload_module(module):
if "unload_handler" in module.__dict__:
module.unload_handler()
# Unload the old plugins
if "plugins" in module.__dict__:
for p in module.plugins:
for cmd_cls_list in all_command_classes:
try:
cmd_cls_list.remove(p)
except ValueError:
pass
for c in all_callbacks.values():
try:
c.remove(p)
except ValueError:
pass
def unload_plugin(fname):
print "Unloading plugin", fname
modulename, ext = os.path.splitext(os.path.basename(fname))
was_loaded = modulename in sys.modules
if was_loaded:
m = __import__(modulename)
unload_module(m)
def reload_plugin(fname):
print "Reloading plugin", fname
path = os.path.dirname(fname)
# Change the current directory to that of the module. It's not safe to just
# add the modules directory to sys.path, as that won't accept unicode paths
# on Windows
oldpath = os.getcwdu()
os.chdir(path)
modulename, ext = os.path.splitext(os.path.basename(fname))
if modulename in sys.modules:
unload_module(sys.modules[modulename])
m_info = imp.find_module(modulename, ["."])
m = imp.load_module(modulename, *m_info)
# Restore the current directory
os.chdir(oldpath)
module_plugins = []
for type_name in dir(m):
try:
t = m.__dict__[type_name]
if t.__bases__:
is_plugin = False
if issubclass(t, ApplicationCommand):
application_command_classes.append(t)
is_plugin = True
if issubclass(t, WindowCommand):
window_command_classes.append(t)
is_plugin = True
if issubclass(t, TextCommand):
text_command_classes.append(t)
is_plugin = True
if is_plugin:
module_plugins.append(t)
if issubclass(t, EventListener):
obj = t()
for p in all_callbacks.iteritems():
if p[0] in dir(obj):
p[1].append(obj)
module_plugins.append(obj)
except AttributeError:
pass
if len(module_plugins) > 0:
m.plugins = module_plugins
def create_application_commands():
cmds = []
for class_ in application_command_classes:
cmds.append(class_())
return cmds
def create_window_commands(window):
cmds = []
for class_ in window_command_classes:
cmds.append(class_(window))
return cmds
def create_text_commands(view):
cmds = []
for class_ in text_command_classes:
cmds.append(class_(view))
return cmds
EVENT_TIMEOUT = 0.2
FAST_EVENT_TIMEOUT = 1 / 60.0
first_time_msgs = set()
msgs = set()
def show_timeout(plugin_name, elapsed, callback):
global first_time_msgs
global msgs
key = plugin_name + callback
msg = ("A plugin (%s) may be making Sublime Text unresponsive by taking too " +
"long (%fs) in its %s callback.\n\nThis message can be disabled via the " +
"detect_slow_plugins setting") % (plugin_name, elapsed, callback)
# Give plugins one chance to respond slowly, to handle any initialisation issues etc.
# This allowance may be removed in the future due to startup time concerns
if not key in first_time_msgs:
first_time_msgs.add(key)
return
if not key in msgs:
msgs.add(key)
if sublime.load_settings('Preferences.sublime-settings').get('detect_slow_plugins', True):
sublime.error_message(msg)
blocking_api_call_count = 0
def on_blocking_api_call():
global blocking_api_call_count
blocking_api_call_count += 1
def run_timed_function(f, name, event_name, timeout):
global blocking_api_call_count
t0 = time.time()
blocking_count = blocking_api_call_count
ret = f()
elapsed = time.time() - t0
if elapsed > timeout and blocking_api_call_count == blocking_count:
show_timeout(name, elapsed, event_name)
return ret
def on_new(v):
for callback in all_callbacks['on_new']:
run_timed_function(lambda: callback.on_new(v),
callback.__module__, "on_new", EVENT_TIMEOUT)
def on_clone(v):
for callback in all_callbacks['on_clone']:
run_timed_function(lambda: callback.on_clone(v),
callback.__module__, "on_clone", EVENT_TIMEOUT)
def on_load(v):
for callback in all_callbacks['on_load']:
run_timed_function(lambda: callback.on_load(v),
callback.__module__, "on_load", EVENT_TIMEOUT)
def on_close(v):
for callback in all_callbacks['on_close']:
run_timed_function(lambda: callback.on_close(v),
callback.__module__, "on_close", EVENT_TIMEOUT)
def on_pre_save(v):
for callback in all_callbacks['on_pre_save']:
run_timed_function(lambda: callback.on_pre_save(v),
callback.__module__, "on_pre_save", EVENT_TIMEOUT)
def on_post_save(v):
for callback in all_callbacks['on_post_save']:
run_timed_function(lambda: callback.on_post_save(v),
callback.__module__, "on_post_save", EVENT_TIMEOUT)
def on_modified(v):
for callback in all_callbacks['on_modified']:
run_timed_function(lambda: callback.on_modified(v),
callback.__module__, "on_modified", FAST_EVENT_TIMEOUT)
def on_selection_modified(v):
for callback in all_callbacks['on_selection_modified']:
run_timed_function(lambda: callback.on_selection_modified(v),
callback.__module__, "on_selection_modified", FAST_EVENT_TIMEOUT)
def on_activated(v):
for callback in all_callbacks['on_activated']:
run_timed_function(lambda: callback.on_activated(v),
callback.__module__, "on_activated", EVENT_TIMEOUT)
def on_deactivated(v):
for callback in all_callbacks['on_deactivated']:
run_timed_function(lambda: callback.on_deactivated(v),
callback.__module__, "on_deactivated", EVENT_TIMEOUT)
def on_project_load(v):
for callback in all_callbacks['on_project_load']:
run_timed_function(lambda: callback.on_project_load(v),
callback.__module__, "on_project_load", EVENT_TIMEOUT)
def on_project_close(v):
for callback in all_callbacks['on_project_close']:
run_timed_function(lambda: callback.on_project_close(v),
callback.__module__, "on_project_close", EVENT_TIMEOUT)
def on_query_context(v, key, operator, operand, match_all):
for callback in all_callbacks['on_query_context']:
val = run_timed_function(lambda: callback.on_query_context(v, key, operator, operand, match_all),
callback.__module__, "on_query_context", FAST_EVENT_TIMEOUT)
if val:
return True
return False
def on_query_completions(v, prefix, locations):
completions = []
flags = 0
for callback in all_callbacks['on_query_completions']:
res = callback.on_query_completions(v, prefix, locations)
if isinstance(res, tuple):
completions += res[0]
flags |= res[1]
elif isinstance(res, list):
completions += res
return (completions,flags)
class Command(object):
def name(self):
clsname = self.__class__.__name__
name = clsname[0].lower()
last_upper = False
for c in clsname[1:]:
if c.isupper() and not last_upper:
name += '_'
name += c.lower()
else:
name += c
last_upper = c.isupper()
if name.endswith("_command"):
name = name[0:-8]
return name
def is_enabled_(self, args):
try:
if args:
if 'event' in args:
del args['event']
return self.is_enabled(**args)
else:
return self.is_enabled()
except TypeError:
return self.is_enabled()
def is_enabled(self):
return True
def is_visible_(self, args):
try:
if args:
return self.is_visible(**args)
else:
return self.is_visible()
except TypeError:
return self.is_visible()
def is_visible(self):
return True
def is_checked_(self, args):
try:
if args:
return self.is_checked(**args)
else:
return self.is_checked()
except TypeError:
return self.is_checked()
def is_checked(self):
return False
def description_(self, args):
try:
if args:
return self.description(**args)
else:
return self.description()
except TypeError as e:
return None
def description(self):
return None
class ApplicationCommand(Command):
def run_(self, args):
if args:
if 'event' in args:
del args['event']
return self.run(**args)
else:
return self.run()
def run(self):
pass
class WindowCommand(Command):
def __init__(self, window):
self.window = window
def run_(self, args):
if args:
if 'event' in args:
del args['event']
return self.run(**args)
else:
return self.run()
def run(self):
pass
class TextCommand(Command):
def __init__(self, view):
self.view = view
def run_(self, args):
if args:
if 'event' in args:
del args['event']
edit = self.view.begin_edit(self.name(), args)
try:
return self.run(edit, **args)
finally:
self.view.end_edit(edit)
else:
edit = self.view.begin_edit(self.name())
try:
return self.run(edit)
finally:
self.view.end_edit(edit)
def run(self, edit):
pass
class EventListener(object):
pass
| |
#!/usr/bin/env python
# pylint: disable=W0612,C0111,C0301
import wx
import pandas as pd
from pmagpy.controlled_vocabularies2 import vocab
# this module will provide all the functionality for the drop-down controlled vocabulary menus
class Menus(object):
"""
Drop-down controlled vocabulary menus for wxPython grid
"""
def __init__(self, data_type, ErMagicCheck, grid, belongs_to):
"""
take: data_type (string), ErMagicCheck (top level class object for ErMagic steps 1-6),
grid (grid object), belongs_to (list of options for data object to belong to, i.e. locations for the site Menus)
"""
# if controlled vocabularies haven't already been grabbed from earthref
# do so now
if not any(vocab.vocabularies):
vocab.get_all_vocabulary()
self.data_type = data_type
self.check = ErMagicCheck # check is top level class object for entire ErMagic steps 1-6
self.grid = grid
self.window = grid.Parent # parent window in which grid resides
self.belongs_to = []
# belongs_to can either be a list of strings OR a list of Pmag_objects
if belongs_to:
for item in belongs_to:
try:
self.belongs_to.append(item.name)
except AttributeError:
self.belongs_to.append(item)
#self.headers = headers
self.selected_col = None
self.selection = [] # [(row, col), (row, col)], sequentially down a column
self.dispersed_selection = [] # [(row, col), (row, col)], not sequential
self.col_color = None
self.colon_delimited_lst = ['specimen_type', 'specimen_class', 'specimen_lithology',
'sample_type', 'sample_class', 'sample_lithology',
'site_type', 'site_class', 'site_lithology',
'er_specimen_names', 'er_sample_names', 'er_site_names',
'er_location_names', 'magic_method_codes', 'magic_method_codes++']
self.InitUI()
def InitUI(self):
belongs_to = self.belongs_to
self.choices = {}
if self.data_type == 'specimen':
self.choices = {1: (belongs_to, False), 3: (vocab.vocabularies['class'], False), 4: (vocab.vocabularies['lithology'], True), 5: (vocab.vocabularies['type'], False)}
if self.data_type == 'sample' or self.data_type == 'site':
self.choices = {1: (belongs_to, False), 3: (vocab.vocabularies['class'], False), 4: (vocab.vocabularies['lithology'], True), 5: (vocab.vocabularies['type'], False)}
if self.data_type in ['specimen', 'sample', 'site']:
list(map(lambda x_y: self.grid.SetColLabelValue(x_y[0], x_y[1]), [(3, '{}_class**'.format(self.data_type)), (4, '{}_lithology**'.format(self.data_type)), (5, '{}_type**'.format(self.data_type))]))
if self.data_type == 'site':
self.choices[6] = (vocab.vocabularies['site_definition'], False)
self.grid.SetColLabelValue(6, 'site_definition**')
if self.data_type == 'location':
self.choices = {2: (vocab.vocabularies['location_type'], False)}
self.grid.SetColLabelValue(2, 'location_type**')
if self.data_type == 'age':
#self.choices = {2: (vocab.vocabulariesulary.age_methods, False), 3: (vocab['age_unit'], False)}
self.choices = {3: (vocab.vocabularies['age_unit'], False)}
#map(lambda (x, y): self.grid.SetColLabelValue(x, y), [(2, 'magic_method_codes**'), (3, 'age_unit**')])
list(map(lambda x_y1: self.grid.SetColLabelValue(x_y1[0], x_y1[1]), [(3, 'age_unit**')]))
for row in range(self.grid.GetNumberRows()):
self.grid.SetReadOnly(row, 0)
if self.data_type == 'orient':
self.choices = {1: (['g', 'b'], False)}
if self.data_type == 'result':
self.choices = {}
self.window.Bind(wx.grid.EVT_GRID_CELL_LEFT_CLICK, lambda event: self.on_left_click(event, self.grid, self.choices), self.grid)
#
cols = self.grid.GetNumberCols()
col_labels = [self.grid.GetColLabelValue(col) for col in range(cols)]
# check if any additional columns have associated controlled vocabularies
# if so, get the vocabulary list from the MagIC API
for col_number, label in enumerate(col_labels):
self.add_drop_down(col_number, label)
def add_drop_down(self, col_number, col_label):
"""
Add a correctly formatted drop-down-menu for given col_label, if required.
Otherwise do nothing.
"""
if col_label in ['magic_method_codes', 'magic_method_codes++']:
self.add_method_drop_down(col_number, col_label)
if col_label in vocab.possible_vocabularies:
if col_number not in list(self.choices.keys()): # if not already assigned above
self.grid.SetColLabelValue(col_number, col_label + "**") # mark it as using a controlled vocabulary
url = 'https://api.earthref.org/MagIC/vocabularies/{}.json'.format(col_label)
controlled_vocabulary = pd.io.json.read_json(url)
stripped_list = []
for item in controlled_vocabulary[col_label][0]:
try:
stripped_list.append(str(item['item']))
except UnicodeEncodeError:
# skips items with non ASCII characters
pass
#stripped_list = [item['item'] for item in controlled_vocabulary[label][0]]
if len(stripped_list) > 100:
# split out the list alphabetically, into a dict of lists {'A': ['alpha', 'artist'], 'B': ['beta', 'beggar']...}
dictionary = {}
for item in stripped_list:
letter = item[0].upper()
if letter not in list(dictionary.keys()):
dictionary[letter] = []
dictionary[letter].append(item)
stripped_list = dictionary
two_tiered = True if isinstance(stripped_list, dict) else False
self.choices[col_number] = (stripped_list, two_tiered)
def add_method_drop_down(self, col_number, col_label):
"""
Add drop-down-menu options for magic_method_codes columns
"""
if self.data_type == 'age':
method_list = vocab.age_methods
elif '++' in col_label:
method_list = vocab.pmag_methods
elif self.data_type == 'result':
method_list = vocab.pmag_methods
else:
method_list = vocab.er_methods
self.choices[col_number] = (method_list, True)
def on_label_click(self, event):
col = event.GetCol()
color = self.grid.GetCellBackgroundColour(0, col)
if color != (191, 216, 216, 255): # light blue
self.col_color = color
if col not in (-1, 0):
# if a new column was chosen without de-selecting the previous column, deselect the old selected_col
if self.selected_col != None and self.selected_col != col:
col_label_value = self.grid.GetColLabelValue(self.selected_col)
self.grid.SetColLabelValue(self.selected_col, col_label_value[:-10])
for row in range(self.grid.GetNumberRows()):
self.grid.SetCellBackgroundColour(row, self.selected_col, self.col_color)# 'white'
self.grid.ForceRefresh()
# deselect col if user is clicking on it a second time
if col == self.selected_col:
col_label_value = self.grid.GetColLabelValue(col)
self.grid.SetColLabelValue(col, col_label_value[:-10])
for row in range(self.grid.GetNumberRows()):
self.grid.SetCellBackgroundColour(row, col, self.col_color) # 'white'
self.grid.ForceRefresh()
self.selected_col = None
# otherwise, select (highlight) col
else:
self.selected_col = col
col_label_value = self.grid.GetColLabelValue(col)
self.grid.SetColLabelValue(col, col_label_value + " \nEDIT ALL")
for row in range(self.grid.GetNumberRows()):
self.grid.SetCellBackgroundColour(row, col, 'light blue')
self.grid.ForceRefresh()
has_dropdown = False
if col in list(self.choices.keys()):
has_dropdown = True
# if the column has no drop-down list, allow user to edit all cells in the column through text entry
if not has_dropdown and col != 0:
if self.selected_col == col:
default_value = self.grid.GetCellValue(0, col)
data = None
dialog = wx.TextEntryDialog(None, "Enter value for all cells in the column\nNote: this will overwrite any existing cell values", "Edit All", default_value, style=wx.OK|wx.CANCEL)
dialog.Centre()
if dialog.ShowModal() == wx.ID_OK:
data = dialog.GetValue()
for row in range(self.grid.GetNumberRows()):
self.grid.SetCellValue(row, col, str(data))
if self.grid.changes:
self.grid.changes.add(row)
else:
self.grid.changes = {row}
dialog.Destroy()
# then deselect column
col_label_value = self.grid.GetColLabelValue(col)
self.grid.SetColLabelValue(col, col_label_value[:-10])
for row in range(self.grid.GetNumberRows()):
self.grid.SetCellBackgroundColour(row, col, self.col_color) # 'white'
self.grid.ForceRefresh()
self.selected_col = None
def clean_up(self):#, grid):
"""
de-select grid cols, refresh grid
"""
if self.selected_col:
col_label_value = self.grid.GetColLabelValue(self.selected_col)
col_label_value = col_label_value.strip('\nEDIT ALL')
self.grid.SetColLabelValue(self.selected_col, col_label_value)
for row in range(self.grid.GetNumberRows()):
self.grid.SetCellBackgroundColour(row, self.selected_col, 'white')
self.grid.ForceRefresh()
def on_left_click(self, event, grid, choices):
"""
creates popup menu when user clicks on the column
if that column is in the list of choices that get a drop-down menu.
allows user to edit the column, but only from available values
"""
color = self.grid.GetCellBackgroundColour(event.GetRow(), event.GetCol())
# allow user to cherry-pick cells for editing. gets selection of meta key for mac, ctrl key for pc
if event.ControlDown() or event.MetaDown():
row, col = event.GetRow(), event.GetCol()
if (row, col) not in self.dispersed_selection:
self.dispersed_selection.append((row, col))
self.grid.SetCellBackgroundColour(row, col, 'light blue')
else:
self.dispersed_selection.remove((row, col))
self.grid.SetCellBackgroundColour(row, col, color)# 'white'
self.grid.ForceRefresh()
return
if event.ShiftDown(): # allow user to highlight multiple consecutive cells in a column
previous_col = self.grid.GetGridCursorCol()
previous_row = self.grid.GetGridCursorRow()
col = event.GetCol()
row = event.GetRow()
if col != previous_col:
return
else:
if row > previous_row:
row_range = list(range(previous_row, row+1))
else:
row_range = list(range(row, previous_row+1))
for r in row_range:
self.grid.SetCellBackgroundColour(r, col, 'light blue')
self.selection.append((r, col))
self.grid.ForceRefresh()
return
selection = False
if self.dispersed_selection:
is_dispersed = True
selection = self.dispersed_selection
if self.selection:
is_dispersed = False
selection = self.selection
try:
col = event.GetCol()
row = event.GetRow()
except AttributeError:
row, col = selection[0][0], selection[0][1]
self.grid.SetGridCursor(row, col)
if col in list(choices.keys()): # column should have a pop-up menu
menu = wx.Menu()
two_tiered = choices[col][1]
choices = choices[col][0]
if not two_tiered: # menu is one tiered
if 'CLEAR cell of all values' not in choices:
choices.insert(0, 'CLEAR cell of all values')
for choice in choices:
if not choice:
choice = " " # prevents error if choice is an empty string
menuitem = menu.Append(wx.ID_ANY, str(choice))
self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), menuitem)
self.show_menu(event, menu)
else: # menu is two_tiered
clear = menu.Append(-1, 'CLEAR cell of all values')
self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), clear)
for choice in sorted(choices.items()):
submenu = wx.Menu()
for item in choice[1]:
menuitem = submenu.Append(-1, str(item))
self.window.Bind(wx.EVT_MENU, lambda event: self.on_select_menuitem(event, grid, row, col, selection), menuitem)
menu.AppendMenu(-1, choice[0], submenu)
self.show_menu(event, menu)
if selection:
# re-whiten the cells that were previously highlighted
for row, col in selection:
self.grid.SetCellBackgroundColour(row, col, self.col_color)
self.dispersed_selection = []
self.selection = []
self.grid.ForceRefresh()
def show_menu(self, event, menu):
position = event.GetPosition()
horizontal, vertical = position
grid_horizontal, grid_vertical = self.grid.GetSize()
if grid_vertical - vertical < 30 and self.grid.GetNumberRows() > 4:
self.grid.PopupMenu(menu, (horizontal+20, 100))
else:
self.window.PopupMenu(menu)
menu.Destroy()
def update_drop_down_menu(self, grid, choices):
self.window.Bind(wx.grid.EVT_GRID_CELL_LEFT_CLICK, lambda event: self.on_left_click(event, grid, choices), grid)
self.choices = choices
def on_select_menuitem(self, event, grid, row, col, selection):
"""
sets value of selected cell to value selected from menu
"""
if self.grid.changes: # if user selects a menuitem, that is an edit
self.grid.changes.add(row)
else:
self.grid.changes = {row}
item_id = event.GetId()
item = event.EventObject.FindItemById(item_id)
label = item.Label
cell_value = grid.GetCellValue(row, col)
if str(label) == "CLEAR cell of all values":
label = ""
col_label = grid.GetColLabelValue(col).strip('\nEDIT ALL').strip('**')
if col_label in self.colon_delimited_lst and label:
if not label.lower() in cell_value.lower():
label += (":" + cell_value).rstrip(':')
else:
label = cell_value
if self.selected_col and self.selected_col == col:
for row in range(self.grid.GetNumberRows()):
grid.SetCellValue(row, col, label)
if self.grid.changes:
self.grid.changes.add(row)
else:
self.grid.changes = {row}
#self.selected_col = None
else:
grid.SetCellValue(row, col, label)
if selection:
for cell in selection:
row = cell[0]
grid.SetCellValue(row, col, label)
return
| |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterpolar"
_path_str = "scatterpolar.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatterpolar.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
Returns
-------
plotly.graph_objs.scatterpolar.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterpolar.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterpolar.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolar.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| |
"""Support for functionality to interact with Android TV / Fire TV devices."""
import functools
import logging
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY, SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE, SUPPORT_STOP, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_STEP)
from homeassistant.const import (
ATTR_COMMAND, ATTR_ENTITY_ID, CONF_DEVICE_CLASS, CONF_HOST, CONF_NAME,
CONF_PORT, STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING,
STATE_STANDBY)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
ANDROIDTV_DOMAIN = 'androidtv'
_LOGGER = logging.getLogger(__name__)
SUPPORT_ANDROIDTV = SUPPORT_PAUSE | SUPPORT_PLAY | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_NEXT_TRACK | SUPPORT_STOP | SUPPORT_VOLUME_MUTE | \
SUPPORT_VOLUME_STEP
SUPPORT_FIRETV = SUPPORT_PAUSE | SUPPORT_PLAY | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PREVIOUS_TRACK | \
SUPPORT_NEXT_TRACK | SUPPORT_SELECT_SOURCE | SUPPORT_STOP
CONF_ADBKEY = 'adbkey'
CONF_ADB_SERVER_IP = 'adb_server_ip'
CONF_ADB_SERVER_PORT = 'adb_server_port'
CONF_APPS = 'apps'
CONF_GET_SOURCES = 'get_sources'
CONF_TURN_ON_COMMAND = 'turn_on_command'
CONF_TURN_OFF_COMMAND = 'turn_off_command'
DEFAULT_NAME = 'Android TV'
DEFAULT_PORT = 5555
DEFAULT_ADB_SERVER_PORT = 5037
DEFAULT_GET_SOURCES = True
DEFAULT_DEVICE_CLASS = 'auto'
DEVICE_ANDROIDTV = 'androidtv'
DEVICE_FIRETV = 'firetv'
DEVICE_CLASSES = [DEFAULT_DEVICE_CLASS, DEVICE_ANDROIDTV, DEVICE_FIRETV]
SERVICE_ADB_COMMAND = 'adb_command'
SERVICE_ADB_COMMAND_SCHEMA = vol.Schema({
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_COMMAND): cv.string,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_DEVICE_CLASS, default=DEFAULT_DEVICE_CLASS):
vol.In(DEVICE_CLASSES),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_ADBKEY): cv.isfile,
vol.Optional(CONF_ADB_SERVER_IP): cv.string,
vol.Optional(CONF_ADB_SERVER_PORT, default=DEFAULT_ADB_SERVER_PORT):
cv.port,
vol.Optional(CONF_GET_SOURCES, default=DEFAULT_GET_SOURCES): cv.boolean,
vol.Optional(CONF_APPS, default=dict()):
vol.Schema({cv.string: cv.string}),
vol.Optional(CONF_TURN_ON_COMMAND): cv.string,
vol.Optional(CONF_TURN_OFF_COMMAND): cv.string
})
# Translate from `AndroidTV` / `FireTV` reported state to HA state.
ANDROIDTV_STATES = {'off': STATE_OFF,
'idle': STATE_IDLE,
'standby': STATE_STANDBY,
'playing': STATE_PLAYING,
'paused': STATE_PAUSED}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Android TV / Fire TV platform."""
from androidtv import setup
hass.data.setdefault(ANDROIDTV_DOMAIN, {})
host = '{0}:{1}'.format(config[CONF_HOST], config[CONF_PORT])
if CONF_ADB_SERVER_IP not in config:
# Use "python-adb" (Python ADB implementation)
adb_log = "using Python ADB implementation "
if CONF_ADBKEY in config:
aftv = setup(host, config[CONF_ADBKEY],
device_class=config[CONF_DEVICE_CLASS])
adb_log += "with adbkey='{0}'".format(config[CONF_ADBKEY])
else:
aftv = setup(host, device_class=config[CONF_DEVICE_CLASS])
adb_log += "without adbkey authentication"
else:
# Use "pure-python-adb" (communicate with ADB server)
aftv = setup(host, adb_server_ip=config[CONF_ADB_SERVER_IP],
adb_server_port=config[CONF_ADB_SERVER_PORT],
device_class=config[CONF_DEVICE_CLASS])
adb_log = "using ADB server at {0}:{1}".format(
config[CONF_ADB_SERVER_IP], config[CONF_ADB_SERVER_PORT])
if not aftv.available:
# Determine the name that will be used for the device in the log
if CONF_NAME in config:
device_name = config[CONF_NAME]
elif config[CONF_DEVICE_CLASS] == DEVICE_ANDROIDTV:
device_name = 'Android TV device'
elif config[CONF_DEVICE_CLASS] == DEVICE_FIRETV:
device_name = 'Fire TV device'
else:
device_name = 'Android TV / Fire TV device'
_LOGGER.warning("Could not connect to %s at %s %s",
device_name, host, adb_log)
raise PlatformNotReady
if host in hass.data[ANDROIDTV_DOMAIN]:
_LOGGER.warning("Platform already setup on %s, skipping", host)
else:
if aftv.DEVICE_CLASS == DEVICE_ANDROIDTV:
device = AndroidTVDevice(aftv, config[CONF_NAME],
config[CONF_APPS],
config.get(CONF_TURN_ON_COMMAND),
config.get(CONF_TURN_OFF_COMMAND))
device_name = config[CONF_NAME] if CONF_NAME in config \
else 'Android TV'
else:
device = FireTVDevice(aftv, config[CONF_NAME], config[CONF_APPS],
config[CONF_GET_SOURCES],
config.get(CONF_TURN_ON_COMMAND),
config.get(CONF_TURN_OFF_COMMAND))
device_name = config[CONF_NAME] if CONF_NAME in config \
else 'Fire TV'
add_entities([device])
_LOGGER.debug("Setup %s at %s%s", device_name, host, adb_log)
hass.data[ANDROIDTV_DOMAIN][host] = device
if hass.services.has_service(ANDROIDTV_DOMAIN, SERVICE_ADB_COMMAND):
return
def service_adb_command(service):
"""Dispatch service calls to target entities."""
cmd = service.data.get(ATTR_COMMAND)
entity_id = service.data.get(ATTR_ENTITY_ID)
target_devices = [dev for dev in hass.data[ANDROIDTV_DOMAIN].values()
if dev.entity_id in entity_id]
for target_device in target_devices:
output = target_device.adb_command(cmd)
# log the output, if there is any
if output:
_LOGGER.info("Output of command '%s' from '%s': %s",
cmd, target_device.entity_id, output)
hass.services.register(ANDROIDTV_DOMAIN, SERVICE_ADB_COMMAND,
service_adb_command,
schema=SERVICE_ADB_COMMAND_SCHEMA)
def adb_decorator(override_available=False):
"""Send an ADB command if the device is available and catch exceptions."""
def _adb_decorator(func):
"""Wait if previous ADB commands haven't finished."""
@functools.wraps(func)
def _adb_exception_catcher(self, *args, **kwargs):
# If the device is unavailable, don't do anything
if not self.available and not override_available:
return None
try:
return func(self, *args, **kwargs)
except self.exceptions as err:
_LOGGER.error(
"Failed to execute an ADB command. ADB connection re-"
"establishing attempt in the next update. Error: %s", err)
self._available = False # pylint: disable=protected-access
return None
return _adb_exception_catcher
return _adb_decorator
class ADBDevice(MediaPlayerDevice):
"""Representation of an Android TV or Fire TV device."""
def __init__(self, aftv, name, apps, turn_on_command,
turn_off_command):
"""Initialize the Android TV / Fire TV device."""
from androidtv.constants import APPS, KEYS
self.aftv = aftv
self._name = name
self._apps = APPS
self._apps.update(apps)
self._keys = KEYS
self.turn_on_command = turn_on_command
self.turn_off_command = turn_off_command
# ADB exceptions to catch
if not self.aftv.adb_server_ip:
# Using "python-adb" (Python ADB implementation)
from adb.adb_protocol import (InvalidChecksumError,
InvalidCommandError,
InvalidResponseError)
from adb.usb_exceptions import TcpTimeoutException
self.exceptions = (AttributeError, BrokenPipeError, TypeError,
ValueError, InvalidChecksumError,
InvalidCommandError, InvalidResponseError,
TcpTimeoutException)
else:
# Using "pure-python-adb" (communicate with ADB server)
self.exceptions = (ConnectionResetError, RuntimeError)
# Property attributes
self._adb_response = None
self._available = self.aftv.available
self._current_app = None
self._state = None
@property
def app_id(self):
"""Return the current app."""
return self._current_app
@property
def app_name(self):
"""Return the friendly name of the current app."""
return self._apps.get(self._current_app, self._current_app)
@property
def available(self):
"""Return whether or not the ADB connection is valid."""
return self._available
@property
def device_state_attributes(self):
"""Provide the last ADB command's response as an attribute."""
return {'adb_response': self._adb_response}
@property
def name(self):
"""Return the device name."""
return self._name
@property
def should_poll(self):
"""Device should be polled."""
return True
@property
def state(self):
"""Return the state of the player."""
return self._state
@adb_decorator()
def media_play(self):
"""Send play command."""
self.aftv.media_play()
@adb_decorator()
def media_pause(self):
"""Send pause command."""
self.aftv.media_pause()
@adb_decorator()
def media_play_pause(self):
"""Send play/pause command."""
self.aftv.media_play_pause()
@adb_decorator()
def turn_on(self):
"""Turn on the device."""
if self.turn_on_command:
self.aftv.adb_shell(self.turn_on_command)
else:
self.aftv.turn_on()
@adb_decorator()
def turn_off(self):
"""Turn off the device."""
if self.turn_off_command:
self.aftv.adb_shell(self.turn_off_command)
else:
self.aftv.turn_off()
@adb_decorator()
def media_previous_track(self):
"""Send previous track command (results in rewind)."""
self.aftv.media_previous_track()
@adb_decorator()
def media_next_track(self):
"""Send next track command (results in fast-forward)."""
self.aftv.media_next_track()
@adb_decorator()
def adb_command(self, cmd):
"""Send an ADB command to an Android TV / Fire TV device."""
key = self._keys.get(cmd)
if key:
self.aftv.adb_shell('input keyevent {}'.format(key))
self._adb_response = None
self.schedule_update_ha_state()
return
if cmd == 'GET_PROPERTIES':
self._adb_response = str(self.aftv.get_properties_dict())
self.schedule_update_ha_state()
return self._adb_response
response = self.aftv.adb_shell(cmd)
if isinstance(response, str) and response.strip():
self._adb_response = response.strip()
else:
self._adb_response = None
self.schedule_update_ha_state()
return self._adb_response
class AndroidTVDevice(ADBDevice):
"""Representation of an Android TV device."""
def __init__(self, aftv, name, apps, turn_on_command,
turn_off_command):
"""Initialize the Android TV device."""
super().__init__(aftv, name, apps, turn_on_command,
turn_off_command)
self._device = None
self._device_properties = self.aftv.device_properties
self._is_volume_muted = None
self._unique_id = self._device_properties.get('serialno')
self._volume_level = None
@adb_decorator(override_available=True)
def update(self):
"""Update the device state and, if necessary, re-connect."""
# Check if device is disconnected.
if not self._available:
# Try to connect
self._available = self.aftv.connect(always_log_errors=False)
# To be safe, wait until the next update to run ADB commands.
return
# If the ADB connection is not intact, don't update.
if not self._available:
return
# Get the updated state and attributes.
state, self._current_app, self._device, self._is_volume_muted, \
self._volume_level = self.aftv.update()
self._state = ANDROIDTV_STATES[state]
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._is_volume_muted
@property
def source(self):
"""Return the current playback device."""
return self._device
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_ANDROIDTV
@property
def unique_id(self):
"""Return the device unique id."""
return self._unique_id
@property
def volume_level(self):
"""Return the volume level."""
return self._volume_level
@adb_decorator()
def media_stop(self):
"""Send stop command."""
self.aftv.media_stop()
@adb_decorator()
def mute_volume(self, mute):
"""Mute the volume."""
self.aftv.mute_volume()
@adb_decorator()
def volume_down(self):
"""Send volume down command."""
self._volume_level = self.aftv.volume_down(self._volume_level)
@adb_decorator()
def volume_up(self):
"""Send volume up command."""
self._volume_level = self.aftv.volume_up(self._volume_level)
class FireTVDevice(ADBDevice):
"""Representation of a Fire TV device."""
def __init__(self, aftv, name, apps, get_sources,
turn_on_command, turn_off_command):
"""Initialize the Fire TV device."""
super().__init__(aftv, name, apps, turn_on_command,
turn_off_command)
self._get_sources = get_sources
self._running_apps = None
@adb_decorator(override_available=True)
def update(self):
"""Update the device state and, if necessary, re-connect."""
# Check if device is disconnected.
if not self._available:
# Try to connect
self._available = self.aftv.connect(always_log_errors=False)
# To be safe, wait until the next update to run ADB commands.
return
# If the ADB connection is not intact, don't update.
if not self._available:
return
# Get the `state`, `current_app`, and `running_apps`.
state, self._current_app, self._running_apps = \
self.aftv.update(self._get_sources)
self._state = ANDROIDTV_STATES[state]
@property
def source(self):
"""Return the current app."""
return self._current_app
@property
def source_list(self):
"""Return a list of running apps."""
return self._running_apps
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_FIRETV
@adb_decorator()
def media_stop(self):
"""Send stop (back) command."""
self.aftv.back()
@adb_decorator()
def select_source(self, source):
"""Select input source.
If the source starts with a '!', then it will close the app instead of
opening it.
"""
if isinstance(source, str):
if not source.startswith('!'):
self.aftv.launch_app(source)
else:
self.aftv.stop_app(source[1:].lstrip())
| |
"""Examine callable regions following genome mapping of short reads.
Identifies callable analysis regions surrounded by larger regions lacking
aligned bases. This allows parallelization of smaller chromosome chunks
through post-processing and variant calling, with each sub-section
mapping handled separately.
Regions are split to try to maintain relative uniformity across the
genome and avoid extremes of large blocks or large numbers of
small blocks.
"""
import collections
import os
import numpy
import pybedtools
import pysam
import toolz as tz
from bcbio import broad, utils
from bcbio.cwl import cwlutils
from bcbio.log import logger
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import shared
from bcbio.pipeline import datadict as dd
from bcbio.variation import coverage
from bcbio.variation import multi as vmulti
def sample_callable_bed(bam_file, ref_file, data):
"""Retrieve callable regions for a sample subset by defined analysis regions.
"""
CovInfo = collections.namedtuple("CovInfo", "callable, raw_callable, depth_files")
config = data["config"]
out_file = "%s-callable_sample.bed" % os.path.splitext(bam_file)[0]
with shared.bedtools_tmpdir({"config": config}):
callable_bed, depth_files = coverage.calculate(bam_file, data)
input_regions_bed = config["algorithm"].get("variant_regions", None)
if not utils.file_uptodate(out_file, callable_bed):
with file_transaction(config, out_file) as tx_out_file:
callable_regions = pybedtools.BedTool(callable_bed)
filter_regions = callable_regions.filter(lambda x: x.name == "CALLABLE")
if input_regions_bed:
if not utils.file_uptodate(out_file, input_regions_bed):
input_regions = pybedtools.BedTool(input_regions_bed)
filter_regions.intersect(input_regions, nonamecheck=True).saveas(tx_out_file)
else:
filter_regions.saveas(tx_out_file)
return CovInfo(out_file, callable_bed, depth_files)
def get_ref_bedtool(ref_file, config, chrom=None):
"""Retrieve a pybedtool BedTool object with reference sizes from input reference.
"""
broad_runner = broad.runner_from_path("picard", config)
ref_dict = broad_runner.run_fn("picard_index_ref", ref_file)
ref_lines = []
with pysam.Samfile(ref_dict, "r") as ref_sam:
for sq in ref_sam.header["SQ"]:
if not chrom or sq["SN"] == chrom:
ref_lines.append("%s\t%s\t%s" % (sq["SN"], 0, sq["LN"]))
return pybedtools.BedTool("\n".join(ref_lines), from_string=True)
def _get_nblock_regions(in_file, min_n_size, ref_regions):
"""Retrieve coordinates of regions in reference genome with no mapping.
These are potential breakpoints for parallelizing analysis.
"""
out_lines = []
called_contigs = set([])
with utils.open_gzipsafe(in_file) as in_handle:
for line in in_handle:
contig, start, end, ctype = line.rstrip().split()
called_contigs.add(contig)
if (ctype in ["REF_N", "NO_COVERAGE", "EXCESSIVE_COVERAGE", "LOW_COVERAGE"] and
int(end) - int(start) > min_n_size):
out_lines.append("%s\t%s\t%s\n" % (contig, start, end))
for refr in ref_regions:
if refr.chrom not in called_contigs:
out_lines.append("%s\t%s\t%s\n" % (refr.chrom, 0, refr.stop))
return pybedtools.BedTool("\n".join(out_lines), from_string=True)
def _combine_regions(all_regions, ref_regions):
"""Combine multiple BEDtools regions of regions into sorted final BEDtool.
"""
chrom_order = {}
for i, x in enumerate(ref_regions):
chrom_order[x.chrom] = i
def wchrom_key(x):
chrom, start, end = x
return (chrom_order[chrom], start, end)
all_intervals = []
for region_group in all_regions:
for region in region_group:
all_intervals.append((region.chrom, int(region.start), int(region.stop)))
all_intervals.sort(key=wchrom_key)
bed_lines = ["%s\t%s\t%s" % (c, s, e) for (c, s, e) in all_intervals]
return pybedtools.BedTool("\n".join(bed_lines), from_string=True)
def _add_config_regions(nblock_regions, ref_regions, config):
"""Add additional nblock regions based on configured regions to call.
Identifies user defined regions which we should not be analyzing.
"""
input_regions_bed = config["algorithm"].get("variant_regions", None)
if input_regions_bed:
input_regions = pybedtools.BedTool(input_regions_bed)
# work around problem with single region not subtracted correctly.
if len(input_regions) == 1:
str_regions = str(input_regions[0]).strip()
input_regions = pybedtools.BedTool("%s\n%s" % (str_regions, str_regions),
from_string=True)
input_nblock = ref_regions.subtract(input_regions, nonamecheck=True)
if input_nblock == ref_regions:
raise ValueError("Input variant_region file (%s) "
"excludes all genomic regions. Do the chromosome names "
"in the BED file match your genome (chr1 vs 1)?" % input_regions_bed)
all_intervals = _combine_regions([input_nblock, nblock_regions], ref_regions)
return all_intervals.merge()
else:
return nblock_regions
class NBlockRegionPicker:
"""Choose nblock regions reasonably spaced across chromosomes.
This avoids excessively large blocks and also large numbers of tiny blocks
by splitting to a defined number of blocks.
Assumes to be iterating over an ordered input file and needs re-initiation
with each new file processed as it keeps track of previous blocks to
maintain the splitting.
"""
def __init__(self, ref_regions, config, min_n_size):
self._end_buffer = 250 if min_n_size > 50 else 0
self._chr_last_blocks = {}
target_blocks = int(config["algorithm"].get("nomap_split_targets", 200))
self._target_size = self._get_target_size(target_blocks, ref_regions)
self._ref_sizes = {x.chrom: x.stop for x in ref_regions}
def _get_target_size(self, target_blocks, ref_regions):
size = 0
for x in ref_regions:
size += (x.end - x.start)
return size // target_blocks
def include_block(self, x):
"""Check for inclusion of block based on distance from previous.
"""
last_pos = self._chr_last_blocks.get(x.chrom, 0)
# Region excludes an entire chromosome, typically decoy/haplotypes
if last_pos <= self._end_buffer and x.stop >= self._ref_sizes.get(x.chrom, 0) - self._end_buffer:
return True
# Do not split on smaller decoy and haplotype chromosomes
elif self._ref_sizes.get(x.chrom, 0) <= self._target_size:
return False
elif (x.start - last_pos) > self._target_size:
self._chr_last_blocks[x.chrom] = x.stop
return True
else:
return False
def expand_block(self, feat):
"""Expand any blocks which are near the start or end of a contig.
"""
chrom_end = self._ref_sizes.get(feat.chrom)
if chrom_end:
if feat.start < self._end_buffer:
feat.start = 0
if feat.stop >= chrom_end - self._end_buffer:
feat.stop = chrom_end
return feat
def block_regions(callable_bed, in_bam, ref_file, data):
"""Find blocks of regions for analysis from mapped input BAM file.
Identifies islands of callable regions, surrounding by regions
with no read support, that can be analyzed independently.
"""
config = data["config"]
min_n_size = int(config["algorithm"].get("nomap_split_size", 250))
with shared.bedtools_tmpdir({"config": config}):
nblock_bed = "%s-nblocks.bed" % utils.splitext_plus(callable_bed)[0]
callblock_bed = "%s-callableblocks.bed" % utils.splitext_plus(callable_bed)[0]
if not utils.file_uptodate(nblock_bed, callable_bed):
ref_regions = get_ref_bedtool(ref_file, config)
nblock_regions = _get_nblock_regions(callable_bed, min_n_size, ref_regions)
nblock_regions = _add_config_regions(nblock_regions, ref_regions, config)
nblock_regions.filter(lambda r: len(r) > min_n_size).saveas(nblock_bed)
if len(ref_regions.subtract(nblock_regions, nonamecheck=True)) > 0:
ref_regions.subtract(nblock_bed, nonamecheck=True).merge(d=min_n_size).saveas(callblock_bed)
else:
raise ValueError("No callable regions found from BAM file. Alignment regions might "
"not overlap with regions found in your `variant_regions` BED: %s" % in_bam)
return callblock_bed, nblock_bed, callable_bed
def _write_bed_regions(data, final_regions, out_file, out_file_ref):
ref_file = tz.get_in(["reference", "fasta", "base"], data)
ref_regions = get_ref_bedtool(ref_file, data["config"])
noanalysis_regions = ref_regions.subtract(final_regions, nonamecheck=True)
final_regions.saveas(out_file)
noanalysis_regions.saveas(out_file_ref)
def _analysis_block_stats(regions, samples):
"""Provide statistics on sizes and number of analysis blocks.
"""
prev = None
between_sizes = []
region_sizes = []
for region in regions:
if prev and prev.chrom == region.chrom:
between_sizes.append(region.start - prev.end)
region_sizes.append(region.end - region.start)
prev = region
def descriptive_stats(xs):
if len(xs) < 2:
return xs
parts = ["min: %s" % min(xs),
"5%%: %s" % numpy.percentile(xs, 5),
"25%%: %s" % numpy.percentile(xs, 25),
"median: %s" % numpy.percentile(xs, 50),
"75%%: %s" % numpy.percentile(xs, 75),
"95%%: %s" % numpy.percentile(xs, 95),
"99%%: %s" % numpy.percentile(xs, 99),
"max: %s" % max(xs)]
return "\n".join([" " + x for x in parts])
logger.info("Identified %s parallel analysis blocks\n" % len(region_sizes) +
"Block sizes:\n%s\n" % descriptive_stats(region_sizes) +
"Between block sizes:\n%s\n" % descriptive_stats(between_sizes))
if len(region_sizes) == 0:
raise ValueError("No callable regions found in: %s" %
(", ".join([dd.get_sample_name(x) for x in samples])))
def _needs_region_update(out_file, samples):
"""Check if we need to update BED file of regions, supporting back compatibility.
"""
nblock_files = [x["regions"]["nblock"] for x in samples if "regions" in x]
# For older approaches and do not create a new set of analysis
# regions, since the new algorithm will re-do all BAM and variant
# steps with new regions
for nblock_file in nblock_files:
test_old = nblock_file.replace("-nblocks", "-analysisblocks")
if os.path.exists(test_old):
return False
# Check if any of the local files have changed so we need to refresh
for noblock_file in nblock_files:
if not utils.file_uptodate(out_file, noblock_file):
return True
return False
def combine_sample_regions(*samples):
"""Create batch-level sets of callable regions for multi-sample calling.
Intersects all non-callable (nblock) regions from all samples in a batch,
producing a global set of callable regions.
"""
samples = utils.unpack_worlds(samples)
samples = [cwlutils.unpack_tarballs(x, x) for x in samples]
# back compatibility -- global file for entire sample set
global_analysis_file = os.path.join(samples[0]["dirs"]["work"], "analysis_blocks.bed")
if utils.file_exists(global_analysis_file) and not _needs_region_update(global_analysis_file, samples):
global_no_analysis_file = os.path.join(os.path.dirname(global_analysis_file), "noanalysis_blocks.bed")
else:
global_analysis_file = None
out = []
analysis_files = []
batches = []
with shared.bedtools_tmpdir(samples[0]):
for batch, items in vmulti.group_by_batch(samples, require_bam=False).items():
batches.append(items)
if global_analysis_file:
analysis_file, no_analysis_file = global_analysis_file, global_no_analysis_file
else:
analysis_file, no_analysis_file = _combine_sample_regions_batch(batch, items)
for data in items:
vr_file = dd.get_variant_regions(data)
if analysis_file:
analysis_files.append(analysis_file)
data["config"]["algorithm"]["callable_regions"] = analysis_file
data["config"]["algorithm"]["non_callable_regions"] = no_analysis_file
data["config"]["algorithm"]["callable_count"] = pybedtools.BedTool(analysis_file).count()
elif vr_file:
data["config"]["algorithm"]["callable_count"] = pybedtools.BedTool(vr_file).count()
# attach a representative sample for calculating callable region
if not data.get("work_bam"):
for x in items:
if x.get("work_bam"):
data["work_bam_callable"] = x["work_bam"]
out.append([data])
assert len(out) == len(samples)
if len(analysis_files) > 0:
final_regions = pybedtools.BedTool(analysis_files[0])
_analysis_block_stats(final_regions, batches[0])
return out
def _combine_sample_regions_batch(batch, items):
"""Combine sample regions within a group of batched samples.
"""
config = items[0]["config"]
work_dir = utils.safe_makedir(os.path.join(items[0]["dirs"]["work"], "regions"))
analysis_file = os.path.join(work_dir, "%s-analysis_blocks.bed" % batch)
no_analysis_file = os.path.join(work_dir, "%s-noanalysis_blocks.bed" % batch)
if not utils.file_exists(analysis_file) or _needs_region_update(analysis_file, items):
# Combine all nblocks into a final set of intersecting regions
# without callable bases. HT @brentp for intersection approach
# https://groups.google.com/forum/?fromgroups#!topic/bedtools-discuss/qA9wK4zN8do
bed_regions = [pybedtools.BedTool(x["regions"]["nblock"])
for x in items if "regions" in x]
if len(bed_regions) == 0:
analysis_file, no_analysis_file = None, None
else:
with file_transaction(items[0], analysis_file, no_analysis_file) as (tx_afile, tx_noafile):
def intersect_two(a, b):
return a.intersect(b, nonamecheck=True)
nblock_regions = reduce(intersect_two, bed_regions).saveas(
"%s-nblock%s" % utils.splitext_plus(tx_afile))
ref_file = tz.get_in(["reference", "fasta", "base"], items[0])
ref_regions = get_ref_bedtool(ref_file, config)
min_n_size = int(config["algorithm"].get("nomap_split_size", 250))
block_filter = NBlockRegionPicker(ref_regions, config, min_n_size)
final_nblock_regions = nblock_regions.filter(
block_filter.include_block).saveas().each(block_filter.expand_block).saveas(
"%s-nblockfinal%s" % utils.splitext_plus(tx_afile))
final_regions = ref_regions.subtract(final_nblock_regions, nonamecheck=True).merge(d=min_n_size)
_write_bed_regions(items[0], final_regions, tx_afile, tx_noafile)
if analysis_file and utils.file_exists(analysis_file):
return analysis_file, no_analysis_file
else:
return None, None
| |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from functools import wraps
import threading
from typing import Callable, Optional
import warnings
from jax._src import traceback_util
traceback_util.register_exclusion(__file__)
from jax._src.lib import xla_bridge
from jax._src.lib import xla_client
def start_server(port: int):
"""Starts a profiler server on port `port`.
Using the "TensorFlow profiler" feature in `TensorBoard
<https://www.tensorflow.org/tensorboard>`_ 2.2 or newer, you can
connect to the profiler server and sample execution traces that show CPU,
GPU, and/or TPU device activity.
Returns a profiler server object. The server remains alive and listening until
the server object is destroyed.
"""
return xla_client.profiler.start_server(port)
class _ProfileState(object):
def __init__(self):
self.profile_session = None
self.log_dir = None
self.lock = threading.Lock()
_profile_state = _ProfileState()
def start_trace(log_dir):
"""Starts a profiler trace.
The trace will capture CPU, GPU, and/or TPU activity, including Python
functions and JAX on-device operations. Use ``stop_trace()`` to end the trace
and save the results to ``log_dir``.
The resulting trace can be viewed with TensorBoard. Note that TensorBoard
doesn't need to be running when collecting the trace.
Only once trace may be collected a time. A RuntimeError will be raised if
``start_trace()`` is called while another trace is running.
Args:
log_dir: The directory to save the profiler trace to (usually the
TensorBoard log directory).
"""
with _profile_state.lock:
if _profile_state.profile_session is not None:
raise RuntimeError("Profile has already been started. "
"Only one profile may be run at a time.")
_profile_state.profile_session = xla_client.profiler.ProfilerSession()
_profile_state.log_dir = log_dir
def stop_trace():
"""Stops the currently-running profiler trace.
The trace will be saved to the ``log_dir`` passed to the corresponding
``start_trace()`` call. Raises a RuntimeError if a trace hasn't been started.
"""
with _profile_state.lock:
if _profile_state.profile_session is None:
raise RuntimeError("No profile started")
_profile_state.profile_session.stop_and_export(_profile_state.log_dir)
_profile_state.profile_session = None
_profile_state.log_dir = None
@contextmanager
def trace(log_dir):
"""Context manager to take a profiler trace.
The trace will capture CPU, GPU, and/or TPU activity, including Python
functions and JAX on-device operations.
The resulting trace can be viewed with TensorBoard. Note that TensorBoard
doesn't need to be running when collecting the trace.
Only once trace may be collected a time. A RuntimeError will be raised if a
trace is started while another trace is running.
Args:
log_dir: The directory to save the profiler trace to (usually the
TensorBoard log directory).
"""
start_trace(log_dir)
try:
yield
finally:
stop_trace()
class TraceAnnotation(xla_client.profiler.TraceMe):
"""Context manager that generates a trace event in the profiler.
The trace event spans the duration of the code enclosed by the context.
For example:
>>> x = jnp.ones((1000, 1000))
>>> with jax.profiler.TraceAnnotation("my_label"):
... result = jnp.dot(x, x.T).block_until_ready()
This will cause a "my_label" event to show up on the trace timeline if the
event occurs while the process is being traced.
"""
pass
# TODO: remove this sometime after jax 0.2.11 is released
class TraceContext(TraceAnnotation):
def __init__(self, *args, **kwargs):
warnings.warn(
"TraceContext has been renamed to TraceAnnotation. This alias "
"will eventually be removed; please update your code.")
super().__init__(*args, **kwargs)
class StepTraceAnnotation(TraceAnnotation):
"""Context manager that generates a step trace event in the profiler.
The step trace event spans the duration of the code enclosed by the context.
The profiler will provide the performance analysis for each step trace event.
For example, it can be used to mark training steps and enable the profiler to
provide the performance analysis per step:
>>> while global_step < NUM_STEPS: # doctest: +SKIP
... with jax.profiler.StepTraceAnnotation("train", step_num=global_step): # doctest: +SKIP
... train_step() # doctest: +SKIP
... global_step += 1 # doctest: +SKIP
This will cause a "train xx" event to show up on the trace timeline if the
event occurs while the process is being traced by TensorBoard. In addition,
if using accelerators, the device trace timeline will also show a "train xx"
event. Note that "step_num" can be set as a keyword argument to pass the
global step number to the profiler.
"""
def __init__(self, name: str, **kwargs):
super().__init__(name, _r=1, **kwargs)
# TODO: remove this sometime after jax 0.2.11 is released
class StepTraceContext(StepTraceAnnotation):
def __init__(self, *args, **kwargs):
warnings.warn(
"StepTraceContext has been renamed to StepTraceAnnotation. This alias "
"will eventually be removed; please update your code.")
super().__init__(*args, **kwargs)
def annotate_function(func: Callable, name: Optional[str] = None,
**decorator_kwargs):
"""Decorator that generates a trace event for the execution of a function.
For example:
>>> @jax.profiler.annotate_function
... def f(x):
... return jnp.dot(x, x.T).block_until_ready()
>>>
>>> result = f(jnp.ones((1000, 1000)))
This will cause an "f" event to show up on the trace timeline if the
function execution occurs while the process is being traced by TensorBoard.
Arguments can be passed to the decorator via :py:func:`functools.partial`.
>>> from functools import partial
>>> @partial(jax.profiler.annotate_function, name="event_name")
... def f(x):
... return jnp.dot(x, x.T).block_until_ready()
>>> result = f(jnp.ones((1000, 1000)))
"""
name = name or getattr(func, '__qualname__', None)
name = name or func.__name__
@wraps(func)
def wrapper(*args, **kwargs):
with TraceAnnotation(name, **decorator_kwargs):
return func(*args, **kwargs)
return wrapper
return wrapper
# TODO: remove this sometime after jax 0.2.11 is released
def trace_function(*args, **kwargs):
warnings.warn(
"trace_function has been renamed to annotate_function. This alias "
"will eventually be removed; please update your code.")
return annotate_function(*args, **kwargs)
def device_memory_profile(backend: Optional[str] = None) -> bytes:
"""Captures a JAX device memory profile as ``pprof``-format protocol buffer.
A device memory profile is a snapshot of the state of memory, that describes the JAX
:class:`jax.DeviceArray` and executable objects present in memory and their
allocation sites.
For more information how to use the device memory profiler, see
:doc:`/device_memory_profiling`.
The profiling system works by instrumenting JAX on-device allocations,
capturing a Python stack trace for each allocation. The instrumentation is
always enabled; :func:`device_memory_profile` provides an API to capture it.
The output of :func:`device_memory_profile` is a binary protocol buffer that
can be interpreted and visualized by the `pprof tool
<https://github.com/google/pprof>`_.
Args:
backend: optional; the name of the JAX backend for which the device memory
profile should be collected.
Returns:
A byte string containing a binary `pprof`-format protocol buffer.
"""
return xla_client.heap_profile(xla_bridge.get_backend(backend))
def save_device_memory_profile(filename, backend: Optional[str] = None):
"""Collects a device memory profile and writes it to a file.
:func:`save_device_memory_profile` is a convenience wrapper around :func:`device_memory_profile`
that saves its output to a ``filename``. See the
:func:`device_memory_profile` documentation for more information.
Args:
filename: the filename to which the profile should be written.
backend: optional; the name of the JAX backend for which the device memory
profile should be collected.
"""
profile = device_memory_profile(backend)
with open(filename, "wb") as f:
f.write(profile)
| |
# -*- coding: utf-8 -*-
u"""wrapper for running simulations
:copyright: Copyright (c) 2017 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern import pkconfig
#: Where we install files with pip
_PYTHON_USER_BASE = 'rsbase'
#: how to run python
_PYTHON = ('python', 'run.py')
#: how to run bash
_BASH = ('bash', 'run.sh')
#: git initialized
_GIT_DIR = '.git'
#: remote host
_GIT_REMOTE = 'bitbucket.org'
#: output directory
_OUT_DIR = 'out'
#: configuration
cfg = None
def default_command(cmd, *args, **kwargs):
"""Wrapper until figure out args with argh"""
import sys
return getattr(sys.modules[__name__], '_cmd_'+ cmd)(*args, **kwargs)
def _call(args):
"""Run a command with the proper local python and path environment
Args:
args (tuple): what to run (flags and all)
"""
from pykern import pkio
import subprocess
import os
ub = pkio.py_path(_PYTHON_USER_BASE)
env = os.environ.copy()
env['PATH'] = str(ub.join('bin')) + ':' + env['PATH']
env['PYTHONUSERBASE'] = str(ub)
subprocess.check_call(args, env=env)
def _cmd_init(*args):
"""Create git repo locally and on remote
"""
from pykern import pkcli
import os.path
#TODO(robnagler) add -public
if os.path.exists(_GIT_DIR):
pkcli.command_error('already initialized (.git directory exists)')
#TODO(robnagler) configure bitbucket locally for each repo
_init_python_user_base()
_init_git()
def _cmd_pip(*args):
"""Install a Python package in rsbase
Args:
args (tuple): arguments to pass to pip
"""
args = ['pip', 'install', '--user'] + list(args)
_call(args)
_git_commit('pip install ' + ' '.join(args), check_init=True)
def _cmd_run(*args):
"""Execute run.py or run.sh
"""
from pykern import pkcli
import os.path
missing = []
# Prefer _BASH, which may call run.py
for x in (_BASH, _PYTHON):
if os.path.exists(x[1]):
_rsmanifest()
msg = ': ' + ' '.join(args) if args else ''
_git_commit('run' + msg, check_init=True)
return _call(x)
missing.append(x[1])
pkcli.command_error('{}: neither run file exists', missing)
def _git_auth():
"""Get git user.name
Returns:
str: configured user name
"""
from pykern import pkcli
import netrc
try:
b = netrc.netrc().authenticators(_GIT_REMOTE)
if b:
return (b[0], b[2])
except netrc.NetrcParseError:
pass
pkcli.command_error('missing login info {}; please "git login"', _GIT_REMOTE)
def _git_commit(msg, check_init=False):
"""Write rsmanifest and commit all files
Args:
check_init (bool): make sure git is initialized
"""
#TODO(robnagler) do every run(?)
from pykern import pkcli
import os.path
import subprocess
if check_init:
if not os.path.exists(_GIT_DIR):
pkcli.command_error('not initialized, please call "init"')
_git_auth()
subprocess.check_call(['git', 'add', '.'])
subprocess.check_call(['git', 'commit', '-m', msg])
c = ['git', 'push']
if not check_init:
c.extend(['-u', 'origin', 'master'])
subprocess.check_call(c)
def _git_api_request(method, url, ctx):
from pykern import pkcli
import requests
user, pw = _git_auth()
ctx['method'] = method
ctx['user'] = user
ctx['pass'] = pw
ctx['host'] = _GIT_REMOTE
ctx['url'] = ('https://api.{host}/2.0/' + url).format(**ctx)
x = dict(
url=ctx['url'],
method=ctx['method'],
auth=(user, pw),
)
if 'json' in ctx:
x['json'] = ctx['json']
r = requests.request(**x)
# Will return 2xx so best test for now
if not r.ok:
pkcli.command_error('{}: post failed: {} {}', ctx['url'], r, r.text)
return r, ctx
def _init_git():
"""Init git locally and to bitbucket"""
from pykern import pkcli
from pykern import pkio
import datetime
import re
import subprocess
title = pkio.py_path().basename
v = datetime.datetime.utcnow().strftime('%Y%m%d-%H%M%S')
name = 'sim-{}-{}'.format(pkio.py_path().basename, v).lower()
r, ctx = _git_api_request(
'post',
'repositories/{user}/{repo}',
dict(
repo=name,
json=dict(
scm='git',
is_private=True,
fork_policy='no_public_forks',
name=name,
),
),
)
repo_url = r.json()['links']['clone'][0]['href']
#TODO(robnagler) add README.md if not already there
subprocess.check_call(['git', 'init'])
subprocess.check_call(['git', 'remote', 'add', 'origin', repo_url])
subprocess.check_call(['git', 'config', 'user.name', ctx['user']])
if pkio.pkunit_prefix:
_pkunit_setup(ctx)
subprocess.check_call(['git', 'checkout', '-b', 'master'])
_out_dir()
_git_commit('init')
def _init_python_user_base():
"""Ensure all python_user_base files are committed"""
from pykern import pkio
ub = pkio.py_path(_PYTHON_USER_BASE).ensure_dir()
ub.join('.gitignore').write('!*\n')
def _out_dir():
from pykern import pkio
p = pkio.py_path(_OUT_DIR).ensure_dir()
p.join('.gitignore').write('*\n!.gitignore\n')
def _pkunit_setup(ctx):
from pykern import pkio
import subprocess
f = pkio.py_path('git-credentials')
f.write('https://{user}:{pass}@{host}'.format(**ctx))
f.chmod(0600)
subprocess.check_call(['git', 'config', 'credential.helper', 'cache'])
subprocess.check_call(['git', 'config', 'credential.helper', 'store --file ' + str(f)])
def _pyenv_version():
"""Determine which pyenv
Returns:
str: pyenv version
"""
import subprocess
return subprocess.check_output(['pyenv', 'version']).split(' ')[0]
def _rsmanifest():
from pykern import pkcollections
from pykern import pkjson
from pykern.pkcli import rsmanifest
import cpuinfo
import datetime
import os
import subprocess
m = rsmanifest.read_all()
m['sim'] = {
'run': {
'datetime': datetime.datetime.utcnow().isoformat(),
'cpu_info': cpuinfo.get_cpu_info(),
'pyenv': _pyenv_version(),
#TODO(robnagler) can't include because of auth/credential
# values in environment variables
#'environ': pkcollections.Dict(os.environ),
},
}
pkjson.dump_pretty(m, filename=rsmanifest.BASENAME)
| |
"""Tests for the Device Registry."""
import asyncio
from unittest.mock import patch
import asynctest
import pytest
from homeassistant.core import callback
from homeassistant.helpers import device_registry
from tests.common import mock_device_registry, flush_store
@pytest.fixture
def registry(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def update_events(hass):
"""Capture update events."""
events = []
@callback
def async_capture(event):
events.append(event.data)
hass.bus.async_listen(device_registry.EVENT_DEVICE_REGISTRY_UPDATED,
async_capture)
return events
async def test_get_or_create_returns_same_entry(hass, registry, update_events):
"""Make sure we do not duplicate entries."""
entry = registry.async_get_or_create(
config_entry_id='1234',
connections={
(device_registry.CONNECTION_NETWORK_MAC, '12:34:56:AB:CD:EF')
},
identifiers={('bridgeid', '0123')},
sw_version='sw-version',
name='name',
manufacturer='manufacturer',
model='model')
entry2 = registry.async_get_or_create(
config_entry_id='1234',
connections={
(device_registry.CONNECTION_NETWORK_MAC, '11:22:33:66:77:88')
},
identifiers={('bridgeid', '0123')},
manufacturer='manufacturer', model='model')
entry3 = registry.async_get_or_create(
config_entry_id='1234',
connections={
(device_registry.CONNECTION_NETWORK_MAC, '12:34:56:AB:CD:EF')
}
)
assert len(registry.devices) == 1
assert entry.id == entry2.id
assert entry.id == entry3.id
assert entry.identifiers == {('bridgeid', '0123')}
assert entry3.manufacturer == 'manufacturer'
assert entry3.model == 'model'
assert entry3.name == 'name'
assert entry3.sw_version == 'sw-version'
await hass.async_block_till_done()
# Only 2 update events. The third entry did not generate any changes.
assert len(update_events) == 2
assert update_events[0]['action'] == 'create'
assert update_events[0]['device_id'] == entry.id
assert update_events[1]['action'] == 'update'
assert update_events[1]['device_id'] == entry.id
async def test_requirement_for_identifier_or_connection(registry):
"""Make sure we do require some descriptor of device."""
entry = registry.async_get_or_create(
config_entry_id='1234',
connections={
(device_registry.CONNECTION_NETWORK_MAC, '12:34:56:AB:CD:EF')
},
identifiers=set(),
manufacturer='manufacturer', model='model')
entry2 = registry.async_get_or_create(
config_entry_id='1234',
connections=set(),
identifiers={('bridgeid', '0123')},
manufacturer='manufacturer', model='model')
entry3 = registry.async_get_or_create(
config_entry_id='1234',
connections=set(),
identifiers=set(),
manufacturer='manufacturer', model='model')
assert len(registry.devices) == 2
assert entry
assert entry2
assert entry3 is None
async def test_multiple_config_entries(registry):
"""Make sure we do not get duplicate entries."""
entry = registry.async_get_or_create(
config_entry_id='123',
connections={
(device_registry.CONNECTION_NETWORK_MAC, '12:34:56:AB:CD:EF')
},
identifiers={('bridgeid', '0123')},
manufacturer='manufacturer', model='model')
entry2 = registry.async_get_or_create(
config_entry_id='456',
connections={
(device_registry.CONNECTION_NETWORK_MAC, '12:34:56:AB:CD:EF')
},
identifiers={('bridgeid', '0123')},
manufacturer='manufacturer', model='model')
entry3 = registry.async_get_or_create(
config_entry_id='123',
connections={
(device_registry.CONNECTION_NETWORK_MAC, '12:34:56:AB:CD:EF')
},
identifiers={('bridgeid', '0123')},
manufacturer='manufacturer', model='model')
assert len(registry.devices) == 1
assert entry.id == entry2.id
assert entry.id == entry3.id
assert entry2.config_entries == {'123', '456'}
async def test_loading_from_storage(hass, hass_storage):
"""Test loading stored devices on start."""
hass_storage[device_registry.STORAGE_KEY] = {
'version': device_registry.STORAGE_VERSION,
'data': {
'devices': [
{
'config_entries': [
'1234'
],
'connections': [
[
'Zigbee',
'01.23.45.67.89'
]
],
'id': 'abcdefghijklm',
'identifiers': [
[
'serial',
'12:34:56:AB:CD:EF'
]
],
'manufacturer': 'manufacturer',
'model': 'model',
'name': 'name',
'sw_version': 'version',
'area_id': '12345A',
'name_by_user': 'Test Friendly Name'
}
]
}
}
registry = await device_registry.async_get_registry(hass)
entry = registry.async_get_or_create(
config_entry_id='1234',
connections={('Zigbee', '01.23.45.67.89')},
identifiers={('serial', '12:34:56:AB:CD:EF')},
manufacturer='manufacturer', model='model')
assert entry.id == 'abcdefghijklm'
assert entry.area_id == '12345A'
assert entry.name_by_user == 'Test Friendly Name'
assert isinstance(entry.config_entries, set)
async def test_removing_config_entries(hass, registry, update_events):
"""Make sure we do not get duplicate entries."""
entry = registry.async_get_or_create(
config_entry_id='123',
connections={
(device_registry.CONNECTION_NETWORK_MAC, '12:34:56:AB:CD:EF')
},
identifiers={('bridgeid', '0123')},
manufacturer='manufacturer', model='model')
entry2 = registry.async_get_or_create(
config_entry_id='456',
connections={
(device_registry.CONNECTION_NETWORK_MAC, '12:34:56:AB:CD:EF')
},
identifiers={('bridgeid', '0123')},
manufacturer='manufacturer', model='model')
entry3 = registry.async_get_or_create(
config_entry_id='123',
connections={
(device_registry.CONNECTION_NETWORK_MAC, '34:56:78:CD:EF:12')
},
identifiers={('bridgeid', '4567')},
manufacturer='manufacturer', model='model')
assert len(registry.devices) == 2
assert entry.id == entry2.id
assert entry.id != entry3.id
assert entry2.config_entries == {'123', '456'}
registry.async_clear_config_entry('123')
entry = registry.async_get_device({('bridgeid', '0123')}, set())
entry3_removed = registry.async_get_device({('bridgeid', '4567')}, set())
assert entry.config_entries == {'456'}
assert entry3_removed is None
await hass.async_block_till_done()
assert len(update_events) == 5
assert update_events[0]['action'] == 'create'
assert update_events[0]['device_id'] == entry.id
assert update_events[1]['action'] == 'update'
assert update_events[1]['device_id'] == entry2.id
assert update_events[2]['action'] == 'create'
assert update_events[2]['device_id'] == entry3.id
assert update_events[3]['action'] == 'update'
assert update_events[3]['device_id'] == entry.id
assert update_events[4]['action'] == 'remove'
assert update_events[4]['device_id'] == entry3.id
async def test_removing_area_id(registry):
"""Make sure we can clear area id."""
entry = registry.async_get_or_create(
config_entry_id='123',
connections={
(device_registry.CONNECTION_NETWORK_MAC, '12:34:56:AB:CD:EF')
},
identifiers={('bridgeid', '0123')},
manufacturer='manufacturer', model='model')
entry_w_area = registry.async_update_device(entry.id, area_id='12345A')
registry.async_clear_area_id('12345A')
entry_wo_area = registry.async_get_device({('bridgeid', '0123')}, set())
assert not entry_wo_area.area_id
assert entry_w_area != entry_wo_area
async def test_specifying_via_device_create(registry):
"""Test specifying a via_device and updating."""
via = registry.async_get_or_create(
config_entry_id='123',
connections={
(device_registry.CONNECTION_NETWORK_MAC, '12:34:56:AB:CD:EF')
},
identifiers={('hue', '0123')},
manufacturer='manufacturer', model='via')
light = registry.async_get_or_create(
config_entry_id='456',
connections=set(),
identifiers={('hue', '456')},
manufacturer='manufacturer', model='light',
via_device=('hue', '0123'))
assert light.via_device_id == via.id
async def test_specifying_via_device_update(registry):
"""Test specifying a via_device and updating."""
light = registry.async_get_or_create(
config_entry_id='456',
connections=set(),
identifiers={('hue', '456')},
manufacturer='manufacturer', model='light',
via_device=('hue', '0123'))
assert light.via_device_id is None
via = registry.async_get_or_create(
config_entry_id='123',
connections={
(device_registry.CONNECTION_NETWORK_MAC, '12:34:56:AB:CD:EF')
},
identifiers={('hue', '0123')},
manufacturer='manufacturer', model='via')
light = registry.async_get_or_create(
config_entry_id='456',
connections=set(),
identifiers={('hue', '456')},
manufacturer='manufacturer', model='light',
via_device=('hue', '0123'))
assert light.via_device_id == via.id
async def test_loading_saving_data(hass, registry):
"""Test that we load/save data correctly."""
orig_via = registry.async_get_or_create(
config_entry_id='123',
connections={
(device_registry.CONNECTION_NETWORK_MAC, '12:34:56:AB:CD:EF')
},
identifiers={('hue', '0123')},
manufacturer='manufacturer', model='via')
orig_light = registry.async_get_or_create(
config_entry_id='456',
connections=set(),
identifiers={('hue', '456')},
manufacturer='manufacturer', model='light',
via_device=('hue', '0123'))
assert len(registry.devices) == 2
# Now load written data in new registry
registry2 = device_registry.DeviceRegistry(hass)
await flush_store(registry._store)
await registry2.async_load()
# Ensure same order
assert list(registry.devices) == list(registry2.devices)
new_via = registry2.async_get_device({('hue', '0123')}, set())
new_light = registry2.async_get_device({('hue', '456')}, set())
assert orig_via == new_via
assert orig_light == new_light
async def test_no_unnecessary_changes(registry):
"""Make sure we do not consider devices changes."""
entry = registry.async_get_or_create(
config_entry_id='1234',
connections={('ethernet', '12:34:56:78:90:AB:CD:EF')},
identifiers={('hue', '456'), ('bla', '123')},
)
with patch('homeassistant.helpers.device_registry'
'.DeviceRegistry.async_schedule_save') as mock_save:
entry2 = registry.async_get_or_create(
config_entry_id='1234',
identifiers={('hue', '456')},
)
assert entry.id == entry2.id
assert len(mock_save.mock_calls) == 0
async def test_format_mac(registry):
"""Make sure we normalize mac addresses."""
entry = registry.async_get_or_create(
config_entry_id='1234',
connections={
(device_registry.CONNECTION_NETWORK_MAC, '12:34:56:AB:CD:EF')
},
)
for mac in [
'123456ABCDEF',
'123456abcdef',
'12:34:56:ab:cd:ef',
'1234.56ab.cdef',
]:
test_entry = registry.async_get_or_create(
config_entry_id='1234',
connections={
(device_registry.CONNECTION_NETWORK_MAC, mac)
},
)
assert test_entry.id == entry.id, mac
assert test_entry.connections == {
(device_registry.CONNECTION_NETWORK_MAC, '12:34:56:ab:cd:ef')
}
# This should not raise
for invalid in [
'invalid_mac',
'123456ABCDEFG', # 1 extra char
'12:34:56:ab:cdef', # not enough :
'12:34:56:ab:cd:e:f', # too many :
'1234.56abcdef', # not enough .
'123.456.abc.def', # too many .
]:
invalid_mac_entry = registry.async_get_or_create(
config_entry_id='1234',
connections={
(device_registry.CONNECTION_NETWORK_MAC, invalid)
},
)
assert list(invalid_mac_entry.connections)[0][1] == invalid
async def test_update(registry):
"""Verify that we can update some attributes of a device."""
entry = registry.async_get_or_create(
config_entry_id='1234',
connections={
(device_registry.CONNECTION_NETWORK_MAC, '12:34:56:AB:CD:EF')
},
identifiers={('hue', '456'), ('bla', '123')})
new_identifiers = {
('hue', '654'),
('bla', '321')
}
assert not entry.area_id
assert not entry.name_by_user
with patch.object(registry, 'async_schedule_save') as mock_save:
updated_entry = registry.async_update_device(
entry.id, area_id='12345A', name_by_user='Test Friendly Name',
new_identifiers=new_identifiers, via_device_id='98765B')
assert mock_save.call_count == 1
assert updated_entry != entry
assert updated_entry.area_id == '12345A'
assert updated_entry.name_by_user == 'Test Friendly Name'
assert updated_entry.identifiers == new_identifiers
assert updated_entry.via_device_id == '98765B'
async def test_loading_race_condition(hass):
"""Test only one storage load called when concurrent loading occurred ."""
with asynctest.patch(
'homeassistant.helpers.device_registry.DeviceRegistry.async_load',
) as mock_load:
results = await asyncio.gather(
device_registry.async_get_registry(hass),
device_registry.async_get_registry(hass),
)
mock_load.assert_called_once_with()
assert results[0] == results[1]
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.api import utils
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
class ListServerFiltersTestJSON(base.BaseV2ComputeTest):
@classmethod
def resource_setup(cls):
cls.set_network_resources(network=True, subnet=True, dhcp=True)
super(ListServerFiltersTestJSON, cls).resource_setup()
cls.client = cls.servers_client
# Check to see if the alternate image ref actually exists...
images_client = cls.images_client
resp, images = images_client.list_images()
if cls.image_ref != cls.image_ref_alt and \
any([image for image in images
if image['id'] == cls.image_ref_alt]):
cls.multiple_images = True
else:
cls.image_ref_alt = cls.image_ref
# Do some sanity checks here. If one of the images does
# not exist, fail early since the tests won't work...
try:
cls.images_client.get_image(cls.image_ref)
except exceptions.NotFound:
raise RuntimeError("Image %s (image_ref) was not found!" %
cls.image_ref)
try:
cls.images_client.get_image(cls.image_ref_alt)
except exceptions.NotFound:
raise RuntimeError("Image %s (image_ref_alt) was not found!" %
cls.image_ref_alt)
cls.s1_name = data_utils.rand_name(cls.__name__ + '-instance')
resp, cls.s1 = cls.create_test_server(name=cls.s1_name,
wait_until='ACTIVE')
cls.s2_name = data_utils.rand_name(cls.__name__ + '-instance')
resp, cls.s2 = cls.create_test_server(name=cls.s2_name,
image_id=cls.image_ref_alt,
wait_until='ACTIVE')
cls.s3_name = data_utils.rand_name(cls.__name__ + '-instance')
resp, cls.s3 = cls.create_test_server(name=cls.s3_name,
flavor=cls.flavor_ref_alt,
wait_until='ACTIVE')
if (CONF.service_available.neutron and
CONF.compute.allow_tenant_isolation):
network = cls.isolated_creds.get_primary_network()
cls.fixed_network_name = network['name']
else:
cls.fixed_network_name = CONF.compute.fixed_network_name
@utils.skip_unless_attr('multiple_images', 'Only one image found')
@test.attr(type='gate')
def test_list_servers_filter_by_image(self):
# Filter the list of servers by image
params = {'image': self.image_ref}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.attr(type='gate')
def test_list_servers_filter_by_flavor(self):
# Filter the list of servers by flavor
params = {'flavor': self.flavor_ref_alt}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertNotIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.attr(type='gate')
def test_list_servers_filter_by_server_name(self):
# Filter the list of servers by server name
params = {'name': self.s1_name}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.attr(type='gate')
def test_list_servers_filter_by_server_status(self):
# Filter the list of servers by server status
params = {'status': 'active'}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.attr(type='gate')
def test_list_servers_filter_by_shutoff_status(self):
# Filter the list of servers by server shutoff status
params = {'status': 'shutoff'}
self.client.stop(self.s1['id'])
self.client.wait_for_server_status(self.s1['id'],
'SHUTOFF')
resp, body = self.client.list_servers(params)
self.client.start(self.s1['id'])
self.client.wait_for_server_status(self.s1['id'],
'ACTIVE')
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.attr(type='gate')
def test_list_servers_filter_by_limit(self):
# Verify only the expected number of servers are returned
params = {'limit': 1}
resp, servers = self.client.list_servers(params)
# when _interface='xml', one element for servers_links in servers
self.assertEqual(1, len([x for x in servers['servers'] if 'id' in x]))
@test.attr(type='gate')
def test_list_servers_filter_by_zero_limit(self):
# Verify only the expected number of servers are returned
params = {'limit': 0}
resp, servers = self.client.list_servers(params)
self.assertEqual(0, len(servers['servers']))
@test.attr(type='gate')
def test_list_servers_filter_by_exceed_limit(self):
# Verify only the expected number of servers are returned
params = {'limit': 100000}
resp, servers = self.client.list_servers(params)
resp, all_servers = self.client.list_servers()
self.assertEqual(len([x for x in all_servers['servers'] if 'id' in x]),
len([x for x in servers['servers'] if 'id' in x]))
@utils.skip_unless_attr('multiple_images', 'Only one image found')
@test.attr(type='gate')
def test_list_servers_detailed_filter_by_image(self):
# Filter the detailed list of servers by image
params = {'image': self.image_ref}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.attr(type='gate')
def test_list_servers_detailed_filter_by_flavor(self):
# Filter the detailed list of servers by flavor
params = {'flavor': self.flavor_ref_alt}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
self.assertNotIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
@test.attr(type='gate')
def test_list_servers_detailed_filter_by_server_name(self):
# Filter the detailed list of servers by server name
params = {'name': self.s1_name}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.attr(type='gate')
def test_list_servers_detailed_filter_by_server_status(self):
# Filter the detailed list of servers by server status
params = {'status': 'active'}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
test_ids = [s['id'] for s in (self.s1, self.s2, self.s3)]
self.assertIn(self.s1['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s2['id'], map(lambda x: x['id'], servers))
self.assertIn(self.s3['id'], map(lambda x: x['id'], servers))
self.assertEqual(['ACTIVE'] * 3, [x['status'] for x in servers
if x['id'] in test_ids])
@test.attr(type='gate')
def test_list_servers_filtered_by_name_wildcard(self):
# List all servers that contains '-instance' in name
params = {'name': '-instance'}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertIn(self.s3_name, map(lambda x: x['name'], servers))
# Let's take random part of name and try to search it
part_name = self.s1_name[6:-1]
params = {'name': part_name}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.attr(type='gate')
def test_list_servers_filtered_by_name_regex(self):
# list of regex that should match s1, s2 and s3
regexes = ['^.*\-instance\-[0-9]+$', '^.*\-instance\-.*$']
for regex in regexes:
params = {'name': regex}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertIn(self.s3_name, map(lambda x: x['name'], servers))
# Let's take random part of name and try to search it
part_name = self.s1_name[-10:]
params = {'name': part_name}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.attr(type='gate')
def test_list_servers_filtered_by_ip(self):
# Filter servers by ip
# Here should be listed 1 server
resp, self.s1 = self.client.get_server(self.s1['id'])
ip = self.s1['addresses'][self.fixed_network_name][0]['addr']
params = {'ip': ip}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers))
@test.skip_because(bug="1182883",
condition=CONF.service_available.neutron)
@test.attr(type='gate')
def test_list_servers_filtered_by_ip_regex(self):
# Filter servers by regex ip
# List all servers filtered by part of ip address.
# Here should be listed all servers
resp, self.s1 = self.client.get_server(self.s1['id'])
ip = self.s1['addresses'][self.fixed_network_name][0]['addr'][0:-3]
params = {'ip': ip}
resp, body = self.client.list_servers(params)
servers = body['servers']
self.assertIn(self.s1_name, map(lambda x: x['name'], servers))
self.assertIn(self.s2_name, map(lambda x: x['name'], servers))
self.assertIn(self.s3_name, map(lambda x: x['name'], servers))
@test.attr(type='gate')
def test_list_servers_detailed_limit_results(self):
# Verify only the expected number of detailed results are returned
params = {'limit': 1}
resp, servers = self.client.list_servers_with_detail(params)
self.assertEqual(1, len(servers['servers']))
class ListServerFiltersTestXML(ListServerFiltersTestJSON):
_interface = 'xml'
| |
"""Partial dependence plots for tree ensembles. """
# Authors: Peter Prettenhofer
# License: BSD Style.
from itertools import count, izip
import numbers
import numpy as np
from scipy.stats.mstats import mquantiles
from ..utils.extmath import cartesian
from ..externals.joblib import Parallel, delayed
from ..utils import array2d
from ..tree._tree import DTYPE
from ._gradient_boosting import _partial_dependence_tree
from .gradient_boosting import BaseGradientBoosting
def _grid_from_X(X, percentiles=(0.05, 0.95), grid_resolution=100):
"""Generate a grid of points based on the ``percentiles of ``X``.
The grid is generated by placing ``grid_resolution`` equally
spaced points between the ``percentiles`` of each column
of ``X``.
Parameters
----------
X : ndarray
The data
percentiles : tuple of floats
The percentiles which are used to construct the extreme
values of the grid axes.
grid_resolution : int
The number of equally spaced points that are placed
on the grid.
Returns
-------
grid : ndarray
All data points on the grid; ``grid.shape[1] == X.shape[1]``
and ``grid.shape[0] == grid_resolution * X.shape[1]``.
axes : seq of ndarray
The axes with which the grid has been created.
"""
if len(percentiles) != 2:
raise ValueError('percentile must be tuple of len 2')
if not all(map(lambda x: 0.0 <= x <= 1.0, percentiles)):
raise ValueError('percentile values must be in [0, 1]')
axes = []
for col in range(X.shape[1]):
uniques = np.unique(X[:, col])
if uniques.shape[0] < grid_resolution:
# feature has low resolution use unique vals
axis = uniques
else:
emp_percentiles = mquantiles(X, prob=percentiles, axis=0)
# create axis based on percentiles and grid resolution
axis = np.linspace(emp_percentiles[0, col],
emp_percentiles[1, col],
num=grid_resolution, endpoint=True)
axes.append(axis)
return cartesian(axes), axes
def partial_dependence(gbrt, target_variables, grid=None, X=None,
percentiles=(0.05, 0.95), grid_resolution=100):
"""Partial dependence of ``target_variables``.
Partial dependence plots show the dependence between the joint values
of the ``target_variables`` and the function represented
by the ``gbrt``.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
target_variables : array-like, dtype=int
The target features for which the partial dependecy should be
computed (size should be smaller than 3 for visual renderings).
grid : array-like, shape=(n_points, len(target_variables))
The grid of ``target_variables`` values for which the
partial dependecy should be evaluted (either ``grid`` or ``X``
must be specified).
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained. It is used to generate
a ``grid`` for the ``target_variables``. The ``grid`` comprises
``grid_resolution`` equally spaced points between the two
``percentiles``.
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the ``grid``. Only if ``X`` is not None.
grid_resolution : int, default=100
The number of equally spaced points on the ``grid``.
Returns
-------
pdp : array, shape=(n_classes, n_points)
The partial dependence function evaluated on the ``grid``.
For regression and binary classification ``n_classes==1``.
axes : seq of ndarray or None
The axes with which the grid has been created or None if
the grid has been given.
Examples
--------
>>> samples = [[0, 0, 2], [1, 0, 0]]
>>> labels = [0, 1]
>>> from sklearn.ensemble import GradientBoostingClassifier
>>> gb = GradientBoostingClassifier().fit(samples, labels)
>>> kwargs = dict(X=samples, percentiles=(0, 1), grid_resolution=2)
>>> partial_dependence(gb, [0], **kwargs)
(array([[-10.72892297, 10.72892297]]), [array([ 0., 1.])])
"""
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
if (grid is None and X is None) or (grid is not None and X is not None):
raise ValueError('Either grid or X must be specified')
target_variables = np.asarray(target_variables, dtype=np.int32,
order='C').ravel()
if any([not (0 <= fx < gbrt.n_features) for fx in target_variables]):
raise ValueError('target_variables must be in [0, %d]'
% (gbrt.n_features - 1))
if X is not None:
X = array2d(X, dtype=DTYPE, order='C')
grid, axes = _grid_from_X(X[:, target_variables], percentiles,
grid_resolution)
else:
assert grid is not None
# dont return axes if grid is given
axes = None
# grid must be 2d
if grid.ndim == 1:
grid = grid[:, np.newaxis]
if grid.ndim != 2:
raise ValueError('grid must be 2d but is %dd' % grid.ndim)
grid = np.asarray(grid, dtype=DTYPE, order='C')
assert grid.shape[1] == target_variables.shape[0]
n_trees_per_stage = gbrt.estimators_.shape[1]
n_estimators = gbrt.estimators_.shape[0]
pdp = np.zeros((n_trees_per_stage, grid.shape[0],), dtype=np.float64,
order='C')
for stage in xrange(n_estimators):
for k in range(n_trees_per_stage):
tree = gbrt.estimators_[stage, k].tree_
_partial_dependence_tree(tree, grid, target_variables,
gbrt.learning_rate, pdp[k])
return pdp, axes
def plot_partial_dependence(gbrt, X, features, feature_names=None,
label=None, n_cols=3, grid_resolution=100,
percentiles=(0.05, 0.95), n_jobs=1,
verbose=0, ax=None, line_kw=None,
contour_kw=None, **fig_kw):
"""Partial dependence plots for ``features``.
The ``len(features)`` plots are aranged in a grid with ``n_cols``
columns. Two-way partial dependence plots are plotted as contour
plots.
Parameters
----------
gbrt : BaseGradientBoosting
A fitted gradient boosting model.
X : array-like, shape=(n_samples, n_features)
The data on which ``gbrt`` was trained.
features : seq of tuples or ints
If seq[i] is an int or a tuple with one int value, a one-way
PDP is created; if seq[i] is a tuple of two ints, a two-way
PDP is created.
feature_names : seq of str
Name of each feature; feature_names[i] holds
the name of the feature with index i.
label : object
The class label for which the PDPs should be computed.
Only if gbrt is a multi-class model. Must be in ``gbrt.classes_``.
n_cols : int
The number of columns in the grid plot (default: 3).
percentiles : (low, high), default=(0.05, 0.95)
The lower and upper percentile used create the extreme values
for the PDP axes.
grid_resolution : int, default=100
The number of equally spaced points on the axes.
n_jobs : int
The number of CPUs to use to compute the PDs. -1 means 'all CPUs'.
Defaults to 1.
verbose : int
Verbose output during PD computations. Defaults to 0.
ax : Matplotlib axis object, default None
An axis object onto which the plots will be drawn.
line_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For one-way partial dependence plots.
contour_kw : dict
Dict with keywords passed to the ``pylab.plot`` call.
For two-way partial dependence plots.
fig_kw : dict
Dict with keywords passed to the figure() call.
Note that all keywords not recognized above will be automatically
included here.
Returns
-------
fig : figure
The Matplotlib Figure object.
axs : seq of Axis objects
A seq of Axis objects, one for each subplot.
Examples
--------
>>> from sklearn.datasets import make_friedman1
>>> from sklearn.ensemble import GradientBoostingRegressor
>>> X, y = make_friedman1()
>>> clf = GradientBoostingRegressor(n_estimators=10).fit(X, y)
>>> fig, axs = plot_partial_dependence(clf, X, [0, (0, 1)]) #doctest: +SKIP
...
"""
import matplotlib.pyplot as plt
from matplotlib import transforms
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import ScalarFormatter
if not isinstance(gbrt, BaseGradientBoosting):
raise ValueError('gbrt has to be an instance of BaseGradientBoosting')
if gbrt.estimators_.shape[0] == 0:
raise ValueError('Call %s.fit before partial_dependence' %
gbrt.__class__.__name__)
# set label_idx for multi-class GBRT
if hasattr(gbrt, 'classes_') and np.size(gbrt.classes_) > 2:
if label is None:
raise ValueError('label is not given for multi-class PDP')
label_idx = np.searchsorted(gbrt.classes_, label)
if gbrt.classes_[label_idx] != label:
raise ValueError('label %s not in ``gbrt.classes_``' % str(label))
else:
# regression and binary classification
label_idx = 0
X = array2d(X, dtype=DTYPE, order='C')
if gbrt.n_features != X.shape[1]:
raise ValueError('X.shape[1] does not match gbrt.n_features')
if line_kw is None:
line_kw = {'color': 'green'}
if contour_kw is None:
contour_kw = {}
# convert feature_names to list
if feature_names is None:
# if not feature_names use fx indices as name
feature_names = map(str, range(gbrt.n_features))
elif isinstance(feature_names, np.ndarray):
feature_names = feature_names.tolist()
def convert_feature(fx):
if isinstance(fx, basestring):
try:
fx = feature_names.index(fx)
except ValueError:
raise ValueError('Feature %s not in feature_names' % fx)
return fx
# convert features into a seq of int tuples
tmp_features = []
for fxs in features:
if isinstance(fxs, (numbers.Integral, basestring)):
fxs = (fxs,)
try:
fxs = np.array([convert_feature(fx) for fx in fxs], dtype=np.int32)
except TypeError:
raise ValueError('features must be either int, str, or tuple '
'of int/str')
if not (1 <= np.size(fxs) <= 2):
raise ValueError('target features must be either one or two')
tmp_features.append(fxs)
features = tmp_features
names = []
try:
for fxs in features:
names.append([feature_names[i] for i in fxs])
except IndexError:
raise ValueError('features[i] must be in [0, n_features) '
'but was %d' % i)
# compute PD functions
pd_result = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(partial_dependence)(gbrt, fxs, X=X,
grid_resolution=grid_resolution)
for fxs in features)
# get global min and max values of PD grouped by plot type
pdp_lim = {}
for pdp, axes in pd_result:
min_pd, max_pd = pdp[label_idx].min(), pdp[label_idx].max()
n_fx = len(axes)
old_min_pd, old_max_pd = pdp_lim.get(n_fx, (min_pd, max_pd))
min_pd = min(min_pd, old_min_pd)
max_pd = max(max_pd, old_max_pd)
pdp_lim[n_fx] = (min_pd, max_pd)
# create contour levels for two-way plots
if 2 in pdp_lim:
Z_level = np.linspace(*pdp_lim[2], num=8)
if ax is None:
fig = plt.figure(**fig_kw)
else:
fig = ax.get_figure()
fig.clear()
n_cols = min(n_cols, len(features))
n_rows = int(np.ceil(len(features) / float(n_cols)))
axs = []
for i, fx, name, (pdp, axes) in izip(count(), features, names,
pd_result):
ax = fig.add_subplot(n_rows, n_cols, i + 1)
if len(axes) == 1:
ax.plot(axes[0], pdp[label_idx].ravel(), **line_kw)
else:
# make contour plot
assert len(axes) == 2
XX, YY = np.meshgrid(axes[0], axes[1])
Z = pdp[label_idx].reshape(map(np.size, axes)).T
CS = ax.contour(XX, YY, Z, levels=Z_level, linewidths=0.5,
colors='k')
ax.contourf(XX, YY, Z, levels=Z_level, vmax=Z_level[-1],
vmin=Z_level[0], alpha=0.75, **contour_kw)
ax.clabel(CS, fmt='%2.2f', colors='k', fontsize=10, inline=True)
# plot data deciles + axes labels
deciles = mquantiles(X[:, fx[0]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transData,
ax.transAxes)
ylim = ax.get_ylim()
ax.vlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_xlabel(name[0])
ax.set_ylim(ylim)
# prevent x-axis ticks from overlapping
ax.xaxis.set_major_locator(MaxNLocator(nbins=6, prune='lower'))
tick_formatter = ScalarFormatter()
tick_formatter.set_powerlimits((-3, 4))
ax.xaxis.set_major_formatter(tick_formatter)
if len(axes) > 1:
# two-way PDP - y-axis deciles + labels
deciles = mquantiles(X[:, fx[1]], prob=np.arange(0.1, 1.0, 0.1))
trans = transforms.blended_transform_factory(ax.transAxes,
ax.transData)
xlim = ax.get_xlim()
ax.hlines(deciles, [0], 0.05, transform=trans, color='k')
ax.set_ylabel(name[1])
# hline erases xlim
ax.set_xlim(xlim)
else:
ax.set_ylabel('Partial dependence')
if len(axes) == 1:
ax.set_ylim(pdp_lim[1])
axs.append(ax)
fig.subplots_adjust(bottom=0.15, top=0.7, left=0.1, right=0.95, wspace=0.4,
hspace=0.3)
return fig, axs
| |
'''
@author: Dallas Fraser
@author: 2016-04-12
@organization: MLSB API
@summary: A base test class for testing API
'''
from api import app, DB
from pprint import PrettyPrinter
from api.model import Player, Team, Sponsor, League, Game, Bat, Espys, Fun, \
Division
from base64 import b64encode
from datetime import date
from api.helper import loads
from api.routes import Routes
from api.variables import PAGE_SIZE
from api.authentication import ADMIN, PASSWORD
from uuid import uuid1
import unittest
headers = {
'Authorization': 'Basic %s' % b64encode(bytes(ADMIN + ':' + PASSWORD,
"utf-8")).decode("ascii")
}
SUCCESSFUL_GET_CODE = 200
REDIRECT_CODE = 302
SUCCESSFUL_DELETE_CODE = 200
SUCCESSFUL_PUT_CODE = 200
SUCCESSFUL_POST_CODE = 201
INVALID_ID = 10000000
UNAUTHORIZED = 401
VALID_YEAR = date.today().year
NOT_FOUND_CODE = 404
class TestSetup(unittest.TestCase):
def setUp(self):
app.config['SQLALCHEMEY_DATABASE_URI'] = "sqlite://"
app.config['TESTING'] = True
self.show_results = False
self.pp = PrettyPrinter(indent=4)
self.counter = 1
self.app = app.test_client()
self.to_delete = []
self.bats_to_delete = []
self.games_to_delete = []
self.fun_to_delete = []
self.espys_to_delete = []
self.teams_to_delete = []
self.players_to_delete = []
self.sponsors_to_delete = []
self.leagues_to_delete = []
self.divisions_to_delete = []
if (not self.tables_created()):
DB.engine.execute('''
DROP TABLE IF EXISTS fun;
DROP TABLE IF EXISTS roster;
DROP TABLE IF EXISTS bat;
DROP TABLE IF EXISTS espys;
DROP TABLE IF EXISTS game;
DROP TABLE IF EXISTS team;
DROP TABLE IF EXISTS player;
DROP TABLE IF EXISTS sponsor;
DROP TABLE IF EXISTS league;
DROP TABLE IF EXISTS division;
''')
DB.create_all()
def tearDown(self):
DB.session.rollback()
espy_query = Espys.query.get
bats_query = Bat.query.get
games_query = Game.query.get
player_query = Player.query.get
team_query = Team.query.get
sponsor_query = Sponsor.query.get
league_query = League.query.get
fun_query = Fun.query.get
division_query = Division.query.get
to_delete = (self.delete_list(self.espys_to_delete, espy_query) +
self.delete_list(self.bats_to_delete, bats_query) +
self.delete_list(self.games_to_delete, games_query) +
self.delete_list(self.players_to_delete, player_query) +
self.delete_list(self.teams_to_delete, team_query) +
self.delete_list(self.sponsors_to_delete, sponsor_query) +
self.delete_list(self.leagues_to_delete, league_query) +
self.delete_list(self.divisions_to_delete,
division_query) +
self.delete_list(self.fun_to_delete, fun_query))
final_not_delete = to_delete
if len(final_not_delete) > 0:
print(final_not_delete)
self.assertFalse(True,
"Unable to delete everying upon tear down")
def increment_counter(self):
"""Increments the counter by 1."""
self.counter += 1
def get_counter(self):
"""Returns the counter used to differentiate between creates object."""
return self.counter
def delete_list(self, values, query):
"""Deletes the list of values given from the database."""
not_deleted = []
for item_id in reversed(values):
try:
item = query(item_id)
if item is not None:
DB.session.delete(item)
DB.session.commit()
except Exception as e:
print(e)
not_deleted.append(item_id)
return not_deleted
def tables_created(self):
"""Returns True if the tables are created."""
# TODO figure out how to check if tables are created
return True
def output(self, data):
"""Prints the data if show_results is True."""
if self.show_results:
self.pp.pprint(data)
def add_fun(self, count, year=date.today().year):
"""Returns a fun json object that was created with a post request."""
params = {"year": year, "count": count}
rv = self.app.post(Routes['fun'], data=params, headers=headers)
self.assertEqual(SUCCESSFUL_POST_CODE,
rv.status_code,
"Unable to add fun object")
self.assertTrue(loads(rv.data) > 0, "Unable to add fun object")
fun = Fun.query.filter(Fun.year == loads(rv.data)).first()
self.fun_to_delete.append(fun.id)
return fun.json()
def add_sponsor(self,
sponsor_name,
link=None,
description=None,
active=True,
nickname=None):
"""Returns a sponsor json object created with a post request."""
active = 1 if active else 0
params = {'sponsor_name': sponsor_name,
"link": link,
"description": description,
"active": active,
"nickname": nickname}
rv = self.app.post(Routes['sponsor'], data=params, headers=headers)
self.assertEqual(SUCCESSFUL_POST_CODE,
rv.status_code,
"Unable to add sponsor object")
self.assertTrue(loads(rv.data) > 0, "Unable to add sponsor object")
sponsor = Sponsor.query.get(loads(rv.data))
self.sponsors_to_delete.append(sponsor.id)
return sponsor.json()
def add_division(self, division_name):
"""Returns" division json object the result of a post request"""
params = {"division_name": division_name}
rv = self.app.post(Routes['division'], data=params, headers=headers)
self.assertEqual(SUCCESSFUL_POST_CODE,
rv.status_code,
"Unable to add divsion object")
self.assertTrue(loads(rv.data) > 0, "Unable to add division object")
division = Division.query.get(loads(rv.data))
self.divisions_to_delete.append(division.id)
return division.json()
def add_league(self, league_name):
"""Returns league json object that was created with a post request."""
params = {"league_name": league_name}
rv = self.app.post(Routes['league'], data=params, headers=headers)
self.assertEqual(SUCCESSFUL_POST_CODE,
rv.status_code,
"Unable to add league object")
self.assertTrue(loads(rv.data) > 0, "Unable to add league object")
league = League.query.get(loads(rv.data))
self.leagues_to_delete.append(league.id)
return league.json()
def add_player(self,
player_name,
email,
gender=None,
password='default',
active=True):
"""Returns player json object that was created with a post request."""
active = 1 if active else 0
params = {"player_name": player_name,
"email": email,
"gender": gender,
"password": password,
"active": active
}
rv = self.app.post(Routes['player'], data=params, headers=headers)
self.assertEqual(SUCCESSFUL_POST_CODE,
rv.status_code,
"Unable to add player object")
self.assertTrue(loads(rv.data) > 0, "Unable to add player object")
player = Player.query.get(loads(rv.data))
self.players_to_delete.append(player.id)
return player.json()
def add_team(self,
color,
sponsor=None,
league=None,
year=date.today().year):
"""Returns a team json object that was created with a post request."""
params = {"sponsor_id": sponsor['sponsor_id'],
"league_id": league['league_id'],
"color": color,
"year": year
}
rv = self.app.post(Routes['team'], data=params, headers=headers)
self.assertEqual(SUCCESSFUL_POST_CODE,
rv.status_code,
"Unable to add team object")
self.assertTrue(loads(rv.data) > 0, "Unable to add team object")
team = Team.query.get(loads(rv.data))
self.teams_to_delete.append(team.id)
return team.json()
def add_game(self,
date,
time,
home_team,
away_team,
league,
division,
status="",
field=""):
"""Returns a game json object that was created with a post request."""
params = {"home_team_id": int(home_team["team_id"]),
"away_team_id": int(away_team["team_id"]),
"date": date,
"time": time,
"league_id": int(league['league_id']),
"division_id": int(division['division_id']),
"status": status
}
rv = self.app.post(Routes['game'], data=params, headers=headers)
self.assertEqual(SUCCESSFUL_POST_CODE,
rv.status_code,
"Unable to add game object")
self.assertTrue(loads(rv.data) > 0, "Unable to add game object")
game = Game.query.get(loads(rv.data))
self.games_to_delete.append(game.id)
return game.json()
def add_bat(self, player, team, game, classification, inning=1, rbi=0):
"""Returns a bat json object that was created with a post request."""
params = {"player_id": int(player['player_id']),
"rbi": rbi,
"inning": inning,
"hit": classification,
"team_id": int(team['team_id']),
"game_id": int(game["game_id"])}
rv = self.app.post(Routes['bat'], data=params, headers=headers)
self.assertEqual(SUCCESSFUL_POST_CODE,
rv.status_code,
"Unable to add bat object")
self.assertTrue(loads(rv.data) > 0, "Unable to add bat object")
bat = Bat.query.get(loads(rv.data))
self.bats_to_delete.append(bat.id)
return bat.json()
def add_espys(self,
team,
sponsor,
description=None,
points=0.0,
receipt=None,
time=None,
date=None):
"""Returns a espy json object that was created with a post request."""
params = {"team_id": team["team_id"],
"sponsor_id": sponsor["sponsor_id"],
"description": description,
"points": points,
"receipt": receipt,
"date": date,
"time": time}
rv = self.app.post(Routes['espy'], data=params, headers=headers)
self.assertEqual(SUCCESSFUL_POST_CODE,
rv.status_code,
"Unable to add espy object")
self.assertTrue(loads(rv.data) > 0, "Unable to add espy object")
espy = Espys.query.get(loads(rv.data))
self.espys_to_delete.append(espy.id)
return espy.json()
def add_player_to_team(self, team, player, captain=False):
"""Adds the given player to a team."""
params = {"player_id": player['player_id']}
if captain:
params['captain'] = 1
rv = self.app.post(Routes['team_roster'] + "/" + str(team['team_id']),
data=params, headers=headers)
self.assertEqual(SUCCESSFUL_POST_CODE,
rv.status_code,
"Unable to add player to team")
def remove_player_from_team(self, team, player):
"""Removes a player from a team."""
query = "?player_id=" + str(player['player_id'])
url_request = (Routes['team_roster'] +
"/" +
str(team['team_id']) +
query)
rv = self.app.delete(url_request, headers=headers)
self.assertEqual(SUCCESSFUL_DELETE_CODE,
rv.status_code,
"Unable to remove player to team")
def deactivate_player(self, player):
"""Deactivate the given player."""
p = Player.query.get(player['player_id'])
p.deactivate()
DB.session.commit()
def submit_a_score(self, player, game, score, hr=[], ss=[]):
"""Submits a score and returns the list of bats created."""
data = {'player_id': player['player_id'],
'game_id': game['game_id'],
'score': score,
'hr': hr,
'ss': ss}
rv = self.app.post(Routes['botsubmitscore'],
data=data,
headers=headers)
self.assertEqual(SUCCESSFUL_GET_CODE,
rv.status_code,
"Unable to submit a game score")
self.assertEqual(loads(rv.data), True, "Unable to submit a game score")
game_model = Game.query.get(game['game_id'])
for bat in game_model.bats:
self.bats_to_delete.append(bat.id)
return [bat.json() for bat in game_model.bats]
def assertFunModelEqual(self, f1, f2, error_message=""):
"""Asserts the two fun json objects are equal."""
self.assertEqual(f1['year'], f2['year'], error_message)
self.assertEqual(f1['count'], f2['count'], error_message)
def assertSponsorModelEqual(self, s1, s2, error_message=""):
"""Asserts the two sponsors json objects are equal."""
self.assertEqual(s1['sponsor_id'], s2['sponsor_id'], error_message)
self.assertEqual(s1['sponsor_name'], s2['sponsor_name'], error_message)
self.assertEqual(s1['link'], s2['link'], error_message)
self.assertEqual(s1['description'], s2['description'], error_message)
self.assertEqual(s1['active'], s2['active'], error_message)
def assertDivisionModelEqual(self, d1, d2, error_message=""):
"""Asserts the two division json objects are equal"""
self.assertEqual(d1['division_name'],
d2['division_name'], error_message)
self.assertEqual(d1['division_shortname'],
d2['division_shortname'], error_message)
self.assertEqual(d1['division_id'],
d2['division_id'], error_message)
def assertLeagueModelEqual(self, l1, l2, error_message=""):
"""Asserts the two league json objects are equal."""
self.assertEqual(l1['league_name'], l2['league_name'], error_message)
self.assertEqual(l1['league_id'], l2['league_id'], error_message)
def assertGameModelEqual(self, g1, g2, error_message=""):
"""Asserts the two game json objects are equal."""
self.assertEqual(g1['date'], g2['date'], error_message)
self.assertEqual(g1['time'], g2['time'], error_message)
self.assertEqual(g1['away_team_id'], g2['away_team_id'], error_message)
self.assertEqual(g1['home_team_id'], g2['home_team_id'], error_message)
self.assertEqual(g1['league_id'], g2['league_id'], error_message)
self.assertEqual(g1['status'], g2['status'], error_message)
self.assertEqual(g1['field'], g2['field'], error_message)
self.assertEqual(g1['game_id'], g2['game_id'], error_message)
def assertPlayerModelEqual(self, p1, p2, error_message=""):
"""Asserts the two player json objects are equal."""
self.assertEqual(p1['player_id'], p2['player_id'], error_message)
self.assertEqual(p1['player_name'], p2['player_name'], error_message)
self.assertEqual(p1['gender'], p2['gender'], error_message)
self.assertEqual(p1['active'], p2['active'], error_message)
def assertTeamModelEqual(self, t1, t2, error_message=""):
"""Asserts the two team json objects are equal."""
self.assertEqual(t1['team_id'], t2['team_id'], error_message)
self.assertEqual(t1['color'], t2['color'], error_message)
self.assertEqual(t1['sponsor_id'], t2['sponsor_id'], error_message)
self.assertEqual(t1['league_id'], t2['league_id'], error_message)
self.assertEqual(t1['year'], t2['year'], error_message)
if (t1['captain'] is not None and t2['captain'] is not None):
self.assertEqual(t1['captain']['player_id'],
t2['captain']['player_id'],
error_message)
else:
self.assertEqual(t1['captain'], t2['captain'], error_message)
def assertEspysModelEqual(self, e1, e2, error_message=""):
"""Asserts the espys fun json objects are equal."""
self.assertEqual(e1['team_id'], e2['team_id'], error_message)
self.assertEqual(e1['sponsor_id'], e2['sponsor_id'], error_message)
self.assertEqual(e1['description'], e2['description'], error_message)
self.assertEqual(e1['points'], e2['points'], error_message)
self.assertEqual(e1['receipt'], e2['receipt'], error_message)
self.assertEqual(e1['time'], e2['time'], error_message)
self.assertEqual(e1['date'], e2['date'], error_message)
def assertBatModelEqual(self, b1, b2, error_message=""):
"""Asserts the two bat json objects are equal."""
self.assertEqual(b1['bat_id'], b2['bat_id'], error_message)
self.assertEqual(b1['team_id'], b2['team_id'], error_message)
self.assertEqual(b1['game_id'], b2['game_id'], error_message)
self.assertEqual(b1['rbi'], b2['rbi'], error_message)
self.assertEqual(b1['inning'], b2['inning'], error_message)
self.assertEqual(b1['player_id'], b2['player_id'], error_message)
self.assertEqual(b1['hit'], b2['hit'], error_message)
def postInvalidTest(self,
route,
params,
expected_status_code,
assert_function,
expect,
error_message=""):
"""Used to test an invalid post test."""
rv = self.app.post(route, data=params, headers=headers)
self.output(loads(rv.data))
self.output(expect)
self.assertEqual(expected_status_code, rv.status_code, error_message)
assert_function(expect, loads(rv.data), error_message)
def putTest(self,
route,
params,
expected_status_code,
assert_function,
expected_object,
error_message=""):
"""Used to test a put request."""
rv = self.app.put(route, data=params, headers=headers)
self.output(loads(rv.data))
self.output(expected_object)
self.assertEqual(expected_status_code, rv.status_code, error_message)
assert_function(expected_object, loads(rv.data), error_message)
def getTest(self,
route,
expected_status_code,
assert_function,
expected_object,
error_message=""):
"""Used to test a get request."""
rv = self.app.get(route, headers=headers)
self.output(loads(rv.data))
self.output(expected_object)
assert_function(expected_object, loads(rv.data), error_message)
self.assertEqual(expected_status_code, rv.status_code, error_message)
def deleteValidTest(self,
route,
expected_status_code_after_deletion,
assert_function,
object_id,
expected_object,
expected_message,
error_message=""):
"""Used to test a delete request for a valid resource."""
# check object exists
self.getTest(route + "/" + str(object_id),
SUCCESSFUL_GET_CODE,
assert_function,
expected_object,
error_message=error_message)
# delete object
rv = self.app.delete(route + "/" + str(object_id), headers=headers)
expect = None
self.output(loads(rv.data))
self.output(expect)
self.assertEqual(loads(rv.data), expect, error_message)
self.assertEqual(rv.status_code, SUCCESSFUL_DELETE_CODE, error_message)
# check object was deleted
self.getTest(route + "/" + str(object_id),
expected_status_code_after_deletion,
self.assertEqual,
{"details": object_id, "message": expected_message},
error_message=error_message)
def deleteInvalidTest(self,
route,
expected_status_code,
expected_message,
error_message=""):
"""Used to test a delete request for an invalid resource."""
rv = self.app.delete(route + "/" + str(INVALID_ID),
headers=headers)
expect = {'details': INVALID_ID, 'message': expected_message}
self.output(loads(rv.data))
self.output(expect)
self.assertEqual(loads(rv.data), expect, error_message)
self.assertEqual(rv.status_code, expected_status_code, error_message)
def getListTest(self, route, error_message=""):
"""Runs a get test on lists."""
done = False
while not done:
rv = self.app.get(route)
self.assertEqual(rv.status_code,
SUCCESSFUL_GET_CODE,
error_message)
pagination = loads(rv.data)
if not pagination['has_next']:
done = True
self.assertTrue(len(pagination['items']) >= 0)
else:
self.assertTrue(pagination['next_url'] is not None,
error_message)
self.assertTrue(len(pagination['items']) > 0)
max_total = pagination['pages'] * PAGE_SIZE
self.assertTrue(pagination['total'] <= max_total)
route = pagination['next_url']
def addGame(tester, day="2014-02-10", time="22:40"):
"""Returns a created game (creates, league, sponsor, two teams)."""
# add two teams, a sponsor and a league
league = tester.add_league(str(uuid1()))
division = tester.add_division(str(uuid1()))
sponsor = tester.add_sponsor(str(uuid1()))
home_team = tester.add_team(str(uuid1()),
sponsor,
league,
VALID_YEAR)
away_team = tester.add_team(str(uuid1()),
sponsor,
league,
VALID_YEAR)
game = tester.add_game(day,
time,
home_team,
away_team,
league,
division)
return game
def addBat(tester, classification):
"""Returns a created bat.
(creates a league, sponsor, two teams, game, player)
"""
division = tester.add_division(str(uuid1()))
league = tester.add_league(str(uuid1()))
sponsor = tester.add_sponsor(str(uuid1()))
home_team = tester.add_team(str(uuid1()), sponsor, league, VALID_YEAR)
away_team = tester.add_team(str(uuid1()), sponsor, league, VALID_YEAR)
game = tester.add_game("2014-02-10",
"22:40",
home_team,
away_team,
league,
division)
player = tester.add_player(str(uuid1()),
str(uuid1()) + "@testing.ca",
gender="M")
bat = tester.add_bat(player, home_team, game, classification)
return bat
def addEspy(tester, points):
"""Returns a espy transaction.
(Creates a league, sponsor, team and espys transaction)
"""
league = tester.add_league(str(uuid1()))
sponsor = tester.add_sponsor(str(uuid1()))
team = tester.add_team(str(uuid1()), sponsor, league, VALID_YEAR)
espy = tester.add_espys(team, sponsor, points=points)
return espy
| |
# -*- coding: utf-8 -*-
"""
core.users.py
~~~~~~
The user module allows administrators to create, modify and delete users.
:copyright: (c) 2014 by @zizzamia
:license: BSD (See LICENSE for details)
"""
import os
import re
from datetime import datetime
from flask import request, g
# Imports inside Bombolone
import model.users
from config import UP_AVATARS_TMP_FOLDER, ACTIVATED
from decorators import check_rank, get_hash
from core.languages import LIST_LANGUAGES
from core.utils import create_password, ensure_objectid, get_extension
from core.upload import UploadAvatar, AVATAR_IMAGE_SIZE
from core.validators import CheckValue
check = CheckValue()
class User(object):
""" This class allows to :
- get_user
- reset
- new
- update
- update_account
- update_profile
- update_password
- remove
"""
user = {}
params = {}
message = None
success = False
image = ''
list_images = []
changed_email = False
def __init__(self, params={}, _id=None, lan="en", language="English"):
""" """
self.params = params
if _id:
self.get_user(_id)
else:
self.reset(lan=lan, language=language)
def get_user(self, _id):
""" Get the user document from Database """
_id = ensure_objectid(_id)
self.user = model.users.find(user_id=_id, my_id=_id)
def reset(self, lan="en", language="English"):
""" Reset user value in Users.user """
self.image = ''
self.list_images = []
self.message = None
self.success = False
self.changed_username = False
self.user = {
"created": datetime.utcnow(),
"description": "",
"email": "",
"image": [],
"location": "",
"name": "",
"username": "",
"password": "",
"rank": 80,
"lan": lan,
"language": language,
"time_zone": "Europe/London",
"web": "",
"status": ACTIVATED
}
def new(self):
""" Insert new user in the database """
form = self.params
self.__request_account()
self.__request_profile()
self.__request_password(new_user=True)
if self.changed_email:
self.user['email'] = self.user['new_email']
if self.message is None:
self.user['password'] = create_password(form['password_new'])
del(self.user['password_new'])
del(self.user['password_check'])
self.user['status'] = ACTIVATED
if 'image_tmp' in self.user:
del(self.user['image_tmp'])
self.user['_id'] = model.users.create(self.user)
if len(form.get('image_uploaded', '')) > 0:
if self.__upload_avatar():
self.user['image'] = self.list_images
model.users.update(user_id=self.user['_id'],
user=self.user)
self.success = True
self.message = g.users_msg('success_new_user')
return False
def update(self):
""" Update user values in the database """
form = self.params
self.__request_account()
self.__request_profile()
self.__request_password()
if self.changed_email:
self.user['email'] = self.user['new_email']
if len(form.get('image_uploaded', '')) > 0:
if self.__upload_avatar():
self.user['image'] = self.list_images
if self.message is None:
if len(form['password_new']):
self.user['password'] = create_password(form['password_new'])
del(self.user['password_new'])
del(self.user['password_check'])
if 'image_tmp' in self.user:
del(self.user['image_tmp'])
model.users.update(user_id=self.user['_id'],
user=self.user)
self.success = True
self.message = g.users_msg('success_update_user')
self.user['password_new'] = ""
self.user['password_check'] = ""
def update_account(self):
""" Update user values in the database """
self.__request_account(True)
if self.message is None and self.changed_email:
response = _check_new_email(user=self.user)
if not response['success']:
self.message = g.users_msg('account_error_email_1')
else:
model.users.update(user_id=self.user['_id'], user=self.user)
self.success = True
self.message = g.users_msg('success_update_email')
if self.message is None:
model.users.update(user_id=self.user['_id'], user=self.user)
self.success = True
self.message = g.users_msg('success_update_account')
def update_profile(self):
""" Update user values in the database """
form = self.params
self.__request_profile()
if form.get('image_uploaded') and self.__upload_avatar():
self.user['image'] = self.list_images
if self.user.get('image_tmp'):
del(self.user['image_tmp'])
if self.message is None:
model.users.update(user_id=self.user['_id'],
user = self.user)
self.success = True
self.message = g.users_msg('success_update_profile')
def update_password(self):
""" Update user values in the database """
form = self.params
old_password = self.user.get('password', False)
self.__request_password(old_password=old_password)
if self.message is None:
self.user['password'] = create_password(form['password_new'])
del(self.user['password_new'])
del(self.user['password_check'])
model.users.update(user_id=self.user['_id'],
user = self.user)
self.success = True
self.message = g.users_msg('success_update_password')
self.user['password'] = ""
self.user['password_new'] = ""
self.user['password_check'] = ""
def remove(self, _id):
""" Remove user from the database """
# It checks my id is different from what I want to delete
if g.my['_id'] != _id:
self.get_user(_id)
# It checks user _id exist and that
# you don't remove an other Software Engineer
if self.user and g.my['rank'] < self.user['rank']:
return model.users.remove(user_id=_id)
return 'nada'
def __request_account(self, settings=None):
""" Get from request.form the account values and check it """
form = self.params
old_username = self.user['username']
self.user['username'] = form['username']
old_email = self.user['email']
new_email = str.lower(str(form['email']))
self.user['lan'] = form['lan']
self.user['time_zone'] = form['time_zone']
if 'status' in form:
self.user['status'] = int(form['status'])
if not settings and g.my["rank"] == 10:
if form['rank'] in map(str, range(10, 90, 10)):
self.user['rank'] = int(form['rank'])
# Check that the username field is not empty
if not len(self.user['username']):
self.message = g.users_msg('error_account_1')
# If the username is changed
elif old_username != self.user['username']:
# It's important to change directory avatars
# Changed username from old_username to new_username
new_username = unicode(self.user['username']).lower()
old_username = unicode(old_username).lower()
# Check the username is available and if is different from old username
if new_username != old_username:
try:
regx = re.compile('^'+new_username+'$', re.IGNORECASE)
available_username = g.db.users.find_one({"username" : regx })
except:
available_username = 'Error invalid expression'
else:
available_username = None
# Check that the username has between 2 and 20 characters
if not check.length(self.user['username'], 2, 20):
self.message = g.users_msg('error_account_2')
# Verify that the format of the username is correct
elif not check.username(self.user['username']):
self.message = g.users_msg('error_account_3')
# Raises an error message if username is not available.
elif not available_username is None:
self.message = g.users_msg('error_account_5')
# Check that the email field is not empty
if not self.message and not len(form['email']):
self.message = g.users_msg('error_account_6')
# If the email is changed
elif not self.message and old_email != new_email:
self.user['new_email'] = new_email
available_email = model.users.find(email=self.user['new_email'],
my_rank=10,
only_one=True)
# Verify that the format of the email is correct
if not check.email(self.user['new_email']):
self.message = g.users_msg('error_account_7')
# Raises an error message if email is not available.
elif available_email:
self.message = g.users_msg('error_account_8')
self.changed_email = True
# Check that the language field is checked
if not self.message and not self.user['lan'] in LIST_LANGUAGES:
self.message = g.users_msg('error_account_9')
# Check that the timezone field is checked
if not self.message and not len(self.user['time_zone']):
self.message = g.users_msg('error_account_10')
def __request_profile(self):
""" Get from request.form the profile values and check it """
form = self.params
self.user['name'] = form['name'].strip()
self.user['description'] = form['description'].strip()
self.user['location'] = form['location']
self.user['web'] = form['web'].strip()
if self.message:
return False
# Check that the name field is not empty
if not len(self.user['name']):
self.message = g.users_msg('error_profile_1')
# Check that the name has between 2 and 60 characters.
elif not check.length(self.user['name'], 2, 60):
self.message = g.users_msg('error_profile_2')
# Check that the format of the full name is correct
# and verify that its field is not empty
elif not check.full_name(self.user['name']) or not len(self.user['name']):
self.message = g.users_msg('error_profile_3')
# Check that the format of the web url is correct
# and verify that its field is not empty
elif not check.url(self.user['web']) and len(self.user['web']):
self.message = g.users_msg('error_profile_4')
def __request_password(self, new_user=False, old_password=False):
""" Get from request.form the password values and check it """
form = self.params
password = form.get('password')
password_new = form.get('password_new')
password_check = form.get('password_check')
if self.message:
return False
# Check that the password_new field is not empty
if new_user and (password_new is None or len(password_new) == 0):
self.message = g.users_msg('error_password_0')
# Check that the password_check field is not empty
elif new_user and (password_check is None or len(password_check) == 0):
self.message = g.users_msg('error_password_2')
elif password_new and len(password_new):
self.user['password_new'] = password_new
self.user['password_check'] = password_check
# Check that the new password has between 6 and 30 characters.
if not check.length(self.user['password_new'], 6, 30):
self.message = g.users_msg('error_password_1')
# Check that both passwords are the same
elif self.user['password_new'] != self.user['password_check']:
self.message = g.users_msg('error_password_2')
if old_password:
# Verify that the old password matches the one entered.
old_password = create_password(password)
if self.user['password'] != old_password:
self.message = g.users_msg('error_password_3')
def __upload_avatar(self):
""" Upload the avatar """
form = self.params
self.user['image_tmp'] = form['image_uploaded']
if self.message or not self.user['image_tmp']:
return False
file_name = os.path.join(UP_AVATARS_TMP_FOLDER, self.user['image_tmp'])
if os.path.exists(file_name):
with open(file_name) as image:
up = UploadAvatar()
up.upload(image=image, user=self.user)
self.list_images = up.names_list
return True
self.message = g.users_msg('error_upload_2')
return False
def get(user_id=None, my_rank=None, my_id=None):
"""
By passing a user id, return an object with the user info.
That object could be different in base of different rank permission.
:param user_id: user id
:returns: an object with all the user information
"""
if user_id is None:
errors = [{ "message": "User id required" }]
elif not ensure_objectid(user_id):
errors = [{ "message": "Bad user id" }]
else:
user = model.users.find(user_id=user_id, my_rank=my_rank, my_id=my_id)
if user:
return dict(success=True, user=user)
else:
errors = [{ "message": "Bad user id" }]
return dict(success=False, errors=errors)
def get_list(my_rank=None, my_id=None):
"""
Returns the users list
"""
users = model.users.find(expand_rank=True,
my_rank=my_rank,
my_id=my_id)
if users:
return dict(success=True, users=users)
else:
errors = [{ "message": "Error" }]
return dict(success=False, errors=errors)
def new(params={}, lan=None, language=None):
"""
"""
user_object = User(params=params,
lan=lan,
language=language)
user_object.new()
if user_object.success:
data = {
"success": True,
"message": user_object.message,
"user": user_object.user
}
else:
errors = [{ "message": user_object.message }]
data = dict(success=False, errors=errors)
return data
def update(user_id=None, params={}):
"""
"""
user_object = User(params=params, _id=user_id)
user_object.update()
if user_object.success:
data = {
"success": True,
"message": user_object.message,
"user": user_object.user
}
else:
errors = [{ "message": user_object.message }]
data = dict(success=False, errors=errors)
return data
def update_profile(user_id=None, params={}):
"""
"""
user_object = User(params=params, _id=user_id)
user_object.update_profile()
if user_object.success:
data = {
"success": True,
"message": user_object.message,
"user": user_object.user
}
else:
errors = [{ "message": user_object.message }]
data = dict(success=False, errors=errors)
return data
def update_account(user_id=None, params={}):
"""
"""
user_object = User(params=params, _id=user_id)
user_object.update_account()
if user_object.success:
data = {
"success": True,
"message": user_object.message,
"user": user_object.user
}
else:
errors = [{ "message": user_object.message }]
data = dict(success=False, errors=errors)
return data
def update_password(user_id=None, params={}):
"""
"""
user_object = User(params=params, _id=user_id)
user_object.update_password()
if user_object.success:
data = {
"success": True,
"message": user_object.message,
"user": user_object.user
}
else:
errors = [{ "message": user_object.message }]
data = dict(success=False, errors=errors)
return data
def remove(user_id=None):
"""
Returns the users list
"""
try:
user = User(user_id)
user.remove(user_id)
data = dict(success=True)
except:
errors = [{ "message": "Error" }]
data = dict(success=False, errors=errors)
return data
def upload_avatar(name=None):
"""
"""
extension = get_extension(name)
up = UploadAvatar()
path_image = up.ajax_upload(UP_AVATARS_TMP_FOLDER, extension)
if up.allowed_file() == False:
success = False
message = g.users_msg('error_upload_1')
else:
up.thumb(AVATAR_IMAGE_SIZE['large'], os.path.join(UP_AVATARS_TMP_FOLDER, path_image))
if path_image:
success = True
message = path_image
if success:
return dict(success=True, message=message)
return dict(success=False, errors=[{ "message": message }])
def _upload_image(user, image_file):
""" """
error_code = None
message = None
up = UploadAvatar()
up.upload(network_object=image_file, user=user)
user['image'] = up.names_list
model.users.update(user_id=user["_id"], image=user['image'])
message = ('users_msg', 'success_update_user')
return error_code, message
| |
from __future__ import unicode_literals
import pygst
pygst.require('0.10')
import gst
import gobject
import logging
import pykka
from mopidy.utils import process
from . import mixers, playlists, utils
from .constants import PlaybackState
from .listener import AudioListener
logger = logging.getLogger(__name__)
mixers.register_mixers()
playlists.register_typefinders()
playlists.register_elements()
MB = 1 << 20
# GST_PLAY_FLAG_VIDEO (1<<0)
# GST_PLAY_FLAG_AUDIO (1<<1)
# GST_PLAY_FLAG_TEXT (1<<2)
# GST_PLAY_FLAG_VIS (1<<3)
# GST_PLAY_FLAG_SOFT_VOLUME (1<<4)
# GST_PLAY_FLAG_NATIVE_AUDIO (1<<5)
# GST_PLAY_FLAG_NATIVE_VIDEO (1<<6)
# GST_PLAY_FLAG_DOWNLOAD (1<<7)
# GST_PLAY_FLAG_BUFFERING (1<<8)
# GST_PLAY_FLAG_DEINTERLACE (1<<9)
# GST_PLAY_FLAG_SOFT_COLORBALANCE (1<<10)
# Default flags to use for playbin: AUDIO, SOFT_VOLUME, DOWNLOAD
PLAYBIN_FLAGS = (1 << 1) | (1 << 4) | (1 << 7)
PLAYBIN_VIS_FLAGS = PLAYBIN_FLAGS | (1 << 3)
class Audio(pykka.ThreadingActor):
"""
Audio output through `GStreamer <http://gstreamer.freedesktop.org/>`_.
"""
#: The GStreamer state mapped to :class:`mopidy.audio.PlaybackState`
state = PlaybackState.STOPPED
def __init__(self, config):
super(Audio, self).__init__()
self._config = config
self._playbin = None
self._signal_ids = {} # {(element, event): signal_id}
self._mixer = None
self._mixer_track = None
self._mixer_scale = None
self._software_mixing = False
self._volume_set = None
self._appsrc = None
self._appsrc_caps = None
self._appsrc_need_data_callback = None
self._appsrc_enough_data_callback = None
self._appsrc_seek_data_callback = None
def on_start(self):
try:
self._setup_playbin()
self._setup_output()
self._setup_visualizer()
self._setup_mixer()
self._setup_message_processor()
except gobject.GError as ex:
logger.exception(ex)
process.exit_process()
def on_stop(self):
self._teardown_message_processor()
self._teardown_mixer()
self._teardown_playbin()
def _connect(self, element, event, *args):
"""Helper to keep track of signal ids based on element+event"""
self._signal_ids[(element, event)] = element.connect(event, *args)
def _disconnect(self, element, event):
"""Helper to disconnect signals created with _connect helper."""
signal_id = self._signal_ids.pop((element, event), None)
if signal_id is not None:
element.disconnect(signal_id)
def _setup_playbin(self):
playbin = gst.element_factory_make('playbin2')
playbin.set_property('flags', PLAYBIN_FLAGS)
self._connect(playbin, 'about-to-finish', self._on_about_to_finish)
self._connect(playbin, 'notify::source', self._on_new_source)
self._playbin = playbin
def _on_about_to_finish(self, element):
source, self._appsrc = self._appsrc, None
if source is None:
return
self._appsrc_caps = None
self._disconnect(source, 'need-data')
self._disconnect(source, 'enough-data')
self._disconnect(source, 'seek-data')
def _on_new_source(self, element, pad):
uri = element.get_property('uri')
if not uri or not uri.startswith('appsrc://'):
return
source = element.get_property('source')
source.set_property('caps', self._appsrc_caps)
source.set_property('format', b'time')
source.set_property('stream-type', b'seekable')
source.set_property('max-bytes', 1 * MB)
source.set_property('min-percent', 50)
self._connect(source, 'need-data', self._appsrc_on_need_data)
self._connect(source, 'enough-data', self._appsrc_on_enough_data)
self._connect(source, 'seek-data', self._appsrc_on_seek_data)
self._appsrc = source
def _appsrc_on_need_data(self, appsrc, gst_length_hint):
length_hint = utils.clocktime_to_millisecond(gst_length_hint)
if self._appsrc_need_data_callback is not None:
self._appsrc_need_data_callback(length_hint)
return True
def _appsrc_on_enough_data(self, appsrc):
if self._appsrc_enough_data_callback is not None:
self._appsrc_enough_data_callback()
return True
def _appsrc_on_seek_data(self, appsrc, gst_position):
position = utils.clocktime_to_millisecond(gst_position)
if self._appsrc_seek_data_callback is not None:
self._appsrc_seek_data_callback(position)
return True
def _teardown_playbin(self):
self._disconnect(self._playbin, 'about-to-finish')
self._disconnect(self._playbin, 'notify::source')
self._playbin.set_state(gst.STATE_NULL)
def _setup_output(self):
output_desc = self._config['audio']['output']
try:
output = gst.parse_bin_from_description(
output_desc, ghost_unconnected_pads=True)
self._playbin.set_property('audio-sink', output)
logger.info('Audio output set to "%s"', output_desc)
except gobject.GError as ex:
logger.error(
'Failed to create audio output "%s": %s', output_desc, ex)
process.exit_process()
def _setup_visualizer(self):
visualizer_element = self._config['audio']['visualizer']
if not visualizer_element:
return
try:
visualizer = gst.element_factory_make(visualizer_element)
self._playbin.set_property('vis-plugin', visualizer)
self._playbin.set_property('flags', PLAYBIN_VIS_FLAGS)
logger.info('Audio visualizer set to "%s"', visualizer_element)
except gobject.GError as ex:
logger.error(
'Failed to create audio visualizer "%s": %s',
visualizer_element, ex)
def _setup_mixer(self):
mixer_desc = self._config['audio']['mixer']
track_desc = self._config['audio']['mixer_track']
volume = self._config['audio']['mixer_volume']
if mixer_desc is None:
logger.info('Not setting up audio mixer')
return
if mixer_desc == 'software':
self._software_mixing = True
logger.info('Audio mixer is using software mixing')
if volume is not None:
self.set_volume(volume)
logger.info('Audio mixer volume set to %d', volume)
return
try:
mixerbin = gst.parse_bin_from_description(
mixer_desc, ghost_unconnected_pads=False)
except gobject.GError as ex:
logger.warning(
'Failed to create audio mixer "%s": %s', mixer_desc, ex)
return
# We assume that the bin will contain a single mixer.
mixer = mixerbin.get_by_interface(b'GstMixer')
if not mixer:
logger.warning(
'Did not find any audio mixers in "%s"', mixer_desc)
return
if mixerbin.set_state(gst.STATE_READY) != gst.STATE_CHANGE_SUCCESS:
logger.warning(
'Setting audio mixer "%s" to READY failed', mixer_desc)
return
track = self._select_mixer_track(mixer, track_desc)
if not track:
logger.warning('Could not find usable audio mixer track')
return
self._mixer = mixer
self._mixer_track = track
self._mixer_scale = (
self._mixer_track.min_volume, self._mixer_track.max_volume)
logger.info(
'Audio mixer set to "%s" using track "%s"',
str(mixer.get_factory().get_name()).decode('utf-8'),
str(track.label).decode('utf-8'))
if volume is not None:
self.set_volume(volume)
logger.info('Audio mixer volume set to %d', volume)
def _select_mixer_track(self, mixer, track_label):
# Ignore tracks without volumes, then look for track with
# label equal to the audio/mixer_track config value, otherwise fallback
# to first usable track hoping the mixer gave them to us in a sensible
# order.
usable_tracks = []
for track in mixer.list_tracks():
if not mixer.get_volume(track):
continue
if track_label and track.label == track_label:
return track
elif track.flags & (gst.interfaces.MIXER_TRACK_MASTER |
gst.interfaces.MIXER_TRACK_OUTPUT):
usable_tracks.append(track)
if usable_tracks:
return usable_tracks[0]
def _teardown_mixer(self):
if self._mixer is not None:
self._mixer.set_state(gst.STATE_NULL)
def _setup_message_processor(self):
bus = self._playbin.get_bus()
bus.add_signal_watch()
self._connect(bus, 'message', self._on_message)
def _teardown_message_processor(self):
bus = self._playbin.get_bus()
self._disconnect(bus, 'message')
bus.remove_signal_watch()
def _on_message(self, bus, message):
if (message.type == gst.MESSAGE_STATE_CHANGED
and message.src == self._playbin):
old_state, new_state, pending_state = message.parse_state_changed()
self._on_playbin_state_changed(old_state, new_state, pending_state)
elif message.type == gst.MESSAGE_BUFFERING:
percent = message.parse_buffering()
logger.debug('Buffer %d%% full', percent)
elif message.type == gst.MESSAGE_EOS:
self._on_end_of_stream()
elif message.type == gst.MESSAGE_ERROR:
error, debug = message.parse_error()
logger.error(
'%s Debug message: %s',
str(error).decode('utf-8'), debug.decode('utf-8') or 'None')
self.stop_playback()
elif message.type == gst.MESSAGE_WARNING:
error, debug = message.parse_warning()
logger.warning(
'%s Debug message: %s',
str(error).decode('utf-8'), debug.decode('utf-8') or 'None')
def _on_playbin_state_changed(self, old_state, new_state, pending_state):
if new_state == gst.STATE_READY and pending_state == gst.STATE_NULL:
# XXX: We're not called on the last state change when going down to
# NULL, so we rewrite the second to last call to get the expected
# behavior.
new_state = gst.STATE_NULL
pending_state = gst.STATE_VOID_PENDING
if pending_state != gst.STATE_VOID_PENDING:
return # Ignore intermediate state changes
if new_state == gst.STATE_READY:
return # Ignore READY state as it's GStreamer specific
if new_state == gst.STATE_PLAYING:
new_state = PlaybackState.PLAYING
elif new_state == gst.STATE_PAUSED:
new_state = PlaybackState.PAUSED
elif new_state == gst.STATE_NULL:
new_state = PlaybackState.STOPPED
old_state, self.state = self.state, new_state
logger.debug(
'Triggering event: state_changed(old_state=%s, new_state=%s)',
old_state, new_state)
AudioListener.send(
'state_changed', old_state=old_state, new_state=new_state)
def _on_end_of_stream(self):
logger.debug('Triggering reached_end_of_stream event')
AudioListener.send('reached_end_of_stream')
def set_uri(self, uri):
"""
Set URI of audio to be played.
You *MUST* call :meth:`prepare_change` before calling this method.
:param uri: the URI to play
:type uri: string
"""
self._playbin.set_property('uri', uri)
def set_appsrc(
self, caps, need_data=None, enough_data=None, seek_data=None):
"""
Switch to using appsrc for getting audio to be played.
You *MUST* call :meth:`prepare_change` before calling this method.
:param caps: GStreamer caps string describing the audio format to
expect
:type caps: string
:param need_data: callback for when appsrc needs data
:type need_data: callable which takes data length hint in ms
:param enough_data: callback for when appsrc has enough data
:type enough_data: callable
:param seek_data: callback for when data from a new position is needed
to continue playback
:type seek_data: callable which takes time position in ms
"""
if isinstance(caps, unicode):
caps = caps.encode('utf-8')
self._appsrc_caps = gst.Caps(caps)
self._appsrc_need_data_callback = need_data
self._appsrc_enough_data_callback = enough_data
self._appsrc_seek_data_callback = seek_data
self._playbin.set_property('uri', 'appsrc://')
def emit_data(self, buffer_):
"""
Call this to deliver raw audio data to be played.
Note that the uri must be set to ``appsrc://`` for this to work.
Returns true if data was delivered.
:param buffer_: buffer to pass to appsrc
:type buffer_: :class:`gst.Buffer`
:rtype: boolean
"""
if not self._appsrc:
return False
return self._appsrc.emit('push-buffer', buffer_) == gst.FLOW_OK
def emit_end_of_stream(self):
"""
Put an end-of-stream token on the playbin. This is typically used in
combination with :meth:`emit_data`.
We will get a GStreamer message when the stream playback reaches the
token, and can then do any end-of-stream related tasks.
"""
self._playbin.get_property('source').emit('end-of-stream')
def get_position(self):
"""
Get position in milliseconds.
:rtype: int
"""
try:
gst_position = self._playbin.query_position(gst.FORMAT_TIME)[0]
return utils.clocktime_to_millisecond(gst_position)
except gst.QueryError:
logger.debug('Position query failed')
return 0
def set_position(self, position):
"""
Set position in milliseconds.
:param position: the position in milliseconds
:type position: int
:rtype: :class:`True` if successful, else :class:`False`
"""
gst_position = utils.millisecond_to_clocktime(position)
return self._playbin.seek_simple(
gst.Format(gst.FORMAT_TIME), gst.SEEK_FLAG_FLUSH, gst_position)
def start_playback(self):
"""
Notify GStreamer that it should start playback.
:rtype: :class:`True` if successfull, else :class:`False`
"""
return self._set_state(gst.STATE_PLAYING)
def pause_playback(self):
"""
Notify GStreamer that it should pause playback.
:rtype: :class:`True` if successfull, else :class:`False`
"""
return self._set_state(gst.STATE_PAUSED)
def prepare_change(self):
"""
Notify GStreamer that we are about to change state of playback.
This function *MUST* be called before changing URIs or doing
changes like updating data that is being pushed. The reason for this
is that GStreamer will reset all its state when it changes to
:attr:`gst.STATE_READY`.
"""
return self._set_state(gst.STATE_READY)
def stop_playback(self):
"""
Notify GStreamer that is should stop playback.
:rtype: :class:`True` if successfull, else :class:`False`
"""
return self._set_state(gst.STATE_NULL)
def _set_state(self, state):
"""
Internal method for setting the raw GStreamer state.
.. digraph:: gst_state_transitions
graph [rankdir="LR"];
node [fontsize=10];
"NULL" -> "READY"
"PAUSED" -> "PLAYING"
"PAUSED" -> "READY"
"PLAYING" -> "PAUSED"
"READY" -> "NULL"
"READY" -> "PAUSED"
:param state: State to set playbin to. One of: `gst.STATE_NULL`,
`gst.STATE_READY`, `gst.STATE_PAUSED` and `gst.STATE_PLAYING`.
:type state: :class:`gst.State`
:rtype: :class:`True` if successfull, else :class:`False`
"""
result = self._playbin.set_state(state)
if result == gst.STATE_CHANGE_FAILURE:
logger.warning(
'Setting GStreamer state to %s failed', state.value_name)
return False
elif result == gst.STATE_CHANGE_ASYNC:
logger.debug(
'Setting GStreamer state to %s is async', state.value_name)
return True
else:
logger.debug(
'Setting GStreamer state to %s is OK', state.value_name)
return True
def get_volume(self):
"""
Get volume level of the installed mixer.
Example values:
0:
Muted.
100:
Max volume for given system.
:class:`None`:
No mixer present, so the volume is unknown.
:rtype: int in range [0..100] or :class:`None`
"""
if self._software_mixing:
return int(round(self._playbin.get_property('volume') * 100))
if self._mixer is None:
return None
volumes = self._mixer.get_volume(self._mixer_track)
avg_volume = float(sum(volumes)) / len(volumes)
internal_scale = (0, 100)
if self._volume_set is not None:
volume_set_on_mixer_scale = self._rescale(
self._volume_set, old=internal_scale, new=self._mixer_scale)
else:
volume_set_on_mixer_scale = None
if volume_set_on_mixer_scale == avg_volume:
return self._volume_set
else:
return self._rescale(
avg_volume, old=self._mixer_scale, new=internal_scale)
def set_volume(self, volume):
"""
Set volume level of the installed mixer.
:param volume: the volume in the range [0..100]
:type volume: int
:rtype: :class:`True` if successful, else :class:`False`
"""
if self._software_mixing:
self._playbin.set_property('volume', volume / 100.0)
return True
if self._mixer is None:
return False
self._volume_set = volume
internal_scale = (0, 100)
volume = self._rescale(
volume, old=internal_scale, new=self._mixer_scale)
volumes = (volume,) * self._mixer_track.num_channels
self._mixer.set_volume(self._mixer_track, volumes)
return self._mixer.get_volume(self._mixer_track) == volumes
def _rescale(self, value, old=None, new=None):
"""Convert value between scales."""
new_min, new_max = new
old_min, old_max = old
if old_min == old_max:
return old_max
scaling = float(new_max - new_min) / (old_max - old_min)
return int(round(scaling * (value - old_min) + new_min))
def get_mute(self):
"""
Get mute status of the installed mixer.
:rtype: :class:`True` if muted, :class:`False` if unmuted,
:class:`None` if no mixer is installed.
"""
if self._software_mixing:
return self._playbin.get_property('mute')
if self._mixer_track is None:
return None
return bool(self._mixer_track.flags & gst.interfaces.MIXER_TRACK_MUTE)
def set_mute(self, mute):
"""
Mute or unmute of the installed mixer.
:param mute: Wether to mute the mixer or not.
:type mute: bool
:rtype: :class:`True` if successful, else :class:`False`
"""
if self._software_mixing:
return self._playbin.set_property('mute', bool(mute))
if self._mixer_track is None:
return False
return self._mixer.set_mute(self._mixer_track, bool(mute))
def set_metadata(self, track):
"""
Set track metadata for currently playing song.
Only needs to be called by sources such as `appsrc` which do not
already inject tags in playbin, e.g. when using :meth:`emit_data` to
deliver raw audio data to GStreamer.
:param track: the current track
:type track: :class:`mopidy.models.Track`
"""
taglist = gst.TagList()
artists = [a for a in (track.artists or []) if a.name]
# Default to blank data to trick shoutcast into clearing any previous
# values it might have.
taglist[gst.TAG_ARTIST] = ' '
taglist[gst.TAG_TITLE] = ' '
taglist[gst.TAG_ALBUM] = ' '
if artists:
taglist[gst.TAG_ARTIST] = ', '.join([a.name for a in artists])
if track.name:
taglist[gst.TAG_TITLE] = track.name
if track.album and track.album.name:
taglist[gst.TAG_ALBUM] = track.album.name
event = gst.event_new_tag(taglist)
self._playbin.send_event(event)
| |
from __future__ import absolute_import
import pytest
import mock
from datetime import datetime, timedelta
from django.utils import timezone
from sentry.models import EventUser, GroupStatus, Release
from sentry.testutils import TestCase
from sentry.search.base import ANY
from sentry.search.utils import parse_query, get_numeric_field_value
def test_get_numeric_field_value():
assert get_numeric_field_value('foo', '10') == {
'foo': 10,
}
assert get_numeric_field_value('foo', '>10') == {
'foo_lower': 10,
'foo_lower_inclusive': False,
}
assert get_numeric_field_value('foo', '>=10') == {
'foo_lower': 10,
'foo_lower_inclusive': True,
}
assert get_numeric_field_value('foo', '<10') == {
'foo_upper': 10,
'foo_upper_inclusive': False,
}
assert get_numeric_field_value('foo', '<=10') == {
'foo_upper': 10,
'foo_upper_inclusive': True,
}
assert get_numeric_field_value(
'foo', '>3.5', type=float
) == {
'foo_lower': 3.5,
'foo_lower_inclusive': False,
}
assert get_numeric_field_value(
'foo', '<=-3.5', type=float
) == {
'foo_upper': -3.5,
'foo_upper_inclusive': True,
}
class ParseQueryTest(TestCase):
def parse_query(self, query):
return parse_query(self.project, query, self.user)
def test_simple(self):
result = self.parse_query('foo bar')
assert result == {'tags': {}, 'query': 'foo bar'}
def test_useless_prefix(self):
result = self.parse_query('foo: bar')
assert result == {'tags': {}, 'query': 'foo: bar'}
def test_useless_prefix_with_symbol(self):
result = self.parse_query('foo: @ba$r')
assert result == {'tags': {}, 'query': 'foo: @ba$r'}
def test_useless_prefix_with_colon(self):
result = self.parse_query('foo: :ba:r::foo:')
assert result == {'tags': {}, 'query': 'foo: :ba:r::foo:'}
def test_handles_space_seperation_after_useless_prefix_exception(self):
result = self.parse_query('foo: bar foo:bar')
assert result == {'tags': {'foo': 'bar'}, 'query': 'foo: bar'}
def test_handles_period_in_tag_key(self):
result = self.parse_query('foo.bar:foobar')
assert result == {'tags': {'foo.bar': 'foobar'}, 'query': ''}
def test_handles_dash_in_tag_key(self):
result = self.parse_query('foo-bar:foobar')
assert result == {'tags': {'foo-bar': 'foobar'}, 'query': ''}
# TODO: update docs to include minutes, days, and weeks suffixes
@mock.patch('django.utils.timezone.now')
def test_age_tag_negative_value(self, now):
start = datetime(2016, 1, 1, tzinfo=timezone.utc)
now.return_value = start
expected = start - timedelta(hours=12)
result = self.parse_query('age:-12h')
assert result == {'tags': {}, 'query': '', 'age_from': expected, 'age_from_inclusive': True}
@mock.patch('django.utils.timezone.now')
def test_age_tag_positive_value(self, now):
start = datetime(2016, 1, 1, tzinfo=timezone.utc)
now.return_value = start
expected = start - timedelta(hours=12)
result = self.parse_query('age:+12h')
assert result == {'tags': {}, 'query': '', 'age_to': expected, 'age_to_inclusive': False}
@mock.patch('django.utils.timezone.now')
def test_age_tag_weeks(self, now):
start = datetime(2016, 1, 1, tzinfo=timezone.utc)
now.return_value = start
expected = start - timedelta(days=35)
result = self.parse_query('age:+5w')
assert result == {'tags': {}, 'query': '', 'age_to': expected, 'age_to_inclusive': False}
@mock.patch('django.utils.timezone.now')
def test_age_tag_days(self, now):
start = datetime(2016, 1, 1, tzinfo=timezone.utc)
now.return_value = start
expected = start - timedelta(days=10)
result = self.parse_query('age:+10d')
assert result == {'tags': {}, 'query': '', 'age_to': expected, 'age_to_inclusive': False}
@mock.patch('django.utils.timezone.now')
def test_age_tag_hours(self, now):
start = datetime(2016, 1, 1, tzinfo=timezone.utc)
now.return_value = start
expected = start - timedelta(hours=10)
result = self.parse_query('age:+10h')
assert result == {'tags': {}, 'query': '', 'age_to': expected, 'age_to_inclusive': False}
@mock.patch('django.utils.timezone.now')
def test_age_tag_minutes(self, now):
start = datetime(2016, 1, 1, tzinfo=timezone.utc)
now.return_value = start
expected = start - timedelta(minutes=30)
result = self.parse_query('age:+30m')
assert result == {'tags': {}, 'query': '', 'age_to': expected, 'age_to_inclusive': False}
@mock.patch('django.utils.timezone.now')
def test_two_age_tags(self, now):
start = datetime(2016, 1, 1, tzinfo=timezone.utc)
now.return_value = start
expected_to = start - timedelta(hours=12)
expected_from = start - timedelta(hours=24)
result = self.parse_query('age:+12h age:-24h')
assert result == {
'tags': {},
'query': '',
'age_to': expected_to,
'age_from': expected_from,
'age_to_inclusive': False,
'age_from_inclusive': True
}
def test_event_timestamp_syntax(self):
result = self.parse_query('event.timestamp:2016-01-02')
assert result == {
'query': '',
'date_from': datetime(2016, 1, 2, tzinfo=timezone.utc),
'date_from_inclusive': True,
'date_to': datetime(2016, 1, 3, tzinfo=timezone.utc),
'date_to_inclusive': False,
'tags': {}
}
def test_times_seen_syntax(self):
result = self.parse_query('timesSeen:10')
assert result == {'tags': {}, 'times_seen': 10, 'query': ''}
# TODO: query parser for '>' timestamp should set inclusive to False.
@pytest.mark.xfail
def test_greater_than_comparator(self):
result = self.parse_query('timesSeen:>10 event.timestamp:>2016-01-02')
assert result == {
'tags': {},
'query': '',
'times_seen_lower': 10,
'times_seen_lower_inclusive': False,
'date_from': datetime(2016, 1, 2, tzinfo=timezone.utc),
'date_from_inclusive': False
}
def test_greater_than_equal_comparator(self):
result = self.parse_query('timesSeen:>=10 event.timestamp:>=2016-01-02')
assert result == {
'tags': {},
'query': '',
'times_seen_lower': 10,
'times_seen_lower_inclusive': True,
'date_from': datetime(2016, 1, 2, tzinfo=timezone.utc),
'date_from_inclusive': True
}
def test_less_than_comparator(self):
result = self.parse_query('event.timestamp:<2016-01-02 timesSeen:<10')
assert result == {
'tags': {},
'query': '',
'times_seen_upper': 10,
'times_seen_upper_inclusive': False,
'date_to': datetime(2016, 1, 2, tzinfo=timezone.utc),
'date_to_inclusive': False
}
# TODO: query parser for '<=' timestamp should set inclusive to True.
@pytest.mark.xfail
def test_less_than_equal_comparator(self):
result = self.parse_query('event.timestamp:<=2016-01-02 timesSeen:<=10')
assert result == {
'tags': {},
'query': '',
'times_seen_upper': 10,
'times_seen_upper_inclusive': True,
'date_to': datetime(2016, 1, 2, tzinfo=timezone.utc),
'date_to_inclusive': True
}
def test_handles_underscore_in_tag_key(self):
result = self.parse_query('foo_bar:foobar')
assert result == {'tags': {'foo_bar': 'foobar'}, 'query': ''}
def test_mix_tag_and_query(self):
result = self.parse_query('foo bar key:value')
assert result == {'tags': {'key': 'value'}, 'query': 'foo bar'}
def test_single_tag(self):
result = self.parse_query('key:value')
assert result == {'tags': {'key': 'value'}, 'query': ''}
def test_tag_with_colon_in_value(self):
result = self.parse_query('url:http://example.com')
assert result == {'tags': {'url': 'http://example.com'}, 'query': ''}
def test_single_space_in_value(self):
result = self.parse_query('key:"value1 value2"')
assert result == {'tags': {'key': 'value1 value2'}, 'query': ''}
def test_multiple_spaces_in_value(self):
result = self.parse_query('key:"value1 value2"')
assert result == {'tags': {'key': 'value1 value2'}, 'query': ''}
def test_invalid_tag_as_query(self):
result = self.parse_query('Resque::DirtyExit')
assert result == {'tags': {}, 'query': 'Resque::DirtyExit'}
def test_colons_in_tag_value(self):
result = self.parse_query('key:Resque::DirtyExit')
assert result == {'tags': {'key': 'Resque::DirtyExit'}, 'query': ''}
def test_multiple_tags(self):
result = self.parse_query('foo:bar key:value')
assert result == {'tags': {'key': 'value', 'foo': 'bar'}, 'query': ''}
def test_single_tag_with_quotes(self):
result = self.parse_query('foo:"bar"')
assert result == {'tags': {'foo': 'bar'}, 'query': ''}
def test_tag_with_quotes_and_query(self):
result = self.parse_query('key:"a value" hello')
assert result == {'tags': {'key': 'a value'}, 'query': 'hello'}
def test_is_resolved(self):
result = self.parse_query('is:resolved')
assert result == {'status': GroupStatus.RESOLVED, 'tags': {}, 'query': ''}
def test_assigned_me(self):
result = self.parse_query('assigned:me')
assert result == {'assigned_to': self.user, 'tags': {}, 'query': ''}
def test_assigned_email(self):
result = self.parse_query('assigned:%s' % (self.user.email, ))
assert result == {'assigned_to': self.user, 'tags': {}, 'query': ''}
def test_assigned_unknown_user(self):
result = self.parse_query('assigned:fake@example.com')
assert result['assigned_to'].id == 0
def test_bookmarks_me(self):
result = self.parse_query('bookmarks:me')
assert result == {'bookmarked_by': self.user, 'tags': {}, 'query': ''}
def test_bookmarks_email(self):
result = self.parse_query('bookmarks:%s' % (self.user.email, ))
assert result == {'bookmarked_by': self.user, 'tags': {}, 'query': ''}
def test_bookmarks_unknown_user(self):
result = self.parse_query('bookmarks:fake@example.com')
assert result['bookmarked_by'].id == 0
def test_first_release(self):
result = self.parse_query('first-release:bar')
assert result == {'first_release': 'bar', 'tags': {}, 'query': ''}
def test_first_release_latest(self):
old = Release.objects.create(organization_id=self.project.organization_id, version='a')
old.add_project(self.project)
new = Release.objects.create(
version='b',
organization_id=self.project.organization_id,
date_released=old.date_added + timedelta(minutes=1),
)
new.add_project(self.project)
result = self.parse_query('first-release:latest')
assert result == {'tags': {}, 'first_release': new.version, 'query': ''}
def test_release(self):
result = self.parse_query('release:bar')
assert result == {'tags': {'sentry:release': 'bar'}, 'query': ''}
def test_dist(self):
result = self.parse_query('dist:123')
assert result == {'tags': {'sentry:dist': '123'}, 'query': ''}
def test_release_latest(self):
old = Release.objects.create(organization_id=self.project.organization_id, version='a')
old.add_project(self.project)
new = Release.objects.create(
version='b',
organization_id=self.project.organization_id,
date_released=old.date_added + timedelta(minutes=1),
)
new.add_project(self.project)
result = self.parse_query('release:latest')
assert result == {'tags': {'sentry:release': new.version}, 'query': ''}
def test_padded_spacing(self):
result = self.parse_query('release:bar foo bar')
assert result == {'tags': {'sentry:release': 'bar'}, 'query': 'foo bar'}
def test_unknown_user_with_dot_query(self):
result = self.parse_query('user.email:fake@example.com')
assert result['tags']['sentry:user'] == 'email:fake@example.com'
def test_unknown_user_value(self):
result = self.parse_query('user.xxxxxx:example')
assert result['tags']['sentry:user'] == 'xxxxxx:example'
def test_user_lookup_with_dot_query(self):
euser = EventUser.objects.create(
project=self.project,
ident='1',
username='foobar',
)
result = self.parse_query('user.username:foobar')
assert result['tags']['sentry:user'] == euser.tag_value
def test_unknown_user_legacy_syntax(self):
result = self.parse_query('user:email:fake@example.com')
assert result['tags']['sentry:user'] == 'email:fake@example.com'
def test_user_lookup_legacy_syntax(self):
euser = EventUser.objects.create(
project=self.project,
ident='1',
username='foobar',
)
result = self.parse_query('user:username:foobar')
assert result['tags']['sentry:user'] == euser.tag_value
def test_is_unassigned(self):
result = self.parse_query('is:unassigned')
assert result == {'unassigned': True, 'tags': {}, 'query': ''}
def test_is_assigned(self):
result = self.parse_query('is:assigned')
assert result == {'unassigned': False, 'tags': {}, 'query': ''}
def test_age_from(self):
result = self.parse_query('age:-24h')
assert result['age_from'] > timezone.now() - timedelta(hours=25)
assert result['age_from'] < timezone.now() - timedelta(hours=23)
assert not result.get('age_to')
def test_age_to(self):
result = self.parse_query('age:+24h')
assert result['age_to'] > timezone.now() - timedelta(hours=25)
assert result['age_to'] < timezone.now() - timedelta(hours=23)
assert not result.get('age_from')
def test_age_range(self):
result = self.parse_query('age:-24h age:+12h')
assert result['age_from'] > timezone.now() - timedelta(hours=25)
assert result['age_from'] < timezone.now() - timedelta(hours=23)
assert result['age_to'] > timezone.now() - timedelta(hours=13)
assert result['age_to'] < timezone.now() - timedelta(hours=11)
def test_first_seen_range(self):
result = self.parse_query('firstSeen:-24h firstSeen:+12h')
assert result['age_from'] > timezone.now() - timedelta(hours=25)
assert result['age_from'] < timezone.now() - timedelta(hours=23)
assert result['age_to'] > timezone.now() - timedelta(hours=13)
assert result['age_to'] < timezone.now() - timedelta(hours=11)
def test_date_range(self):
result = self.parse_query('event.timestamp:>2016-01-01 event.timestamp:<2016-01-02')
assert result['date_from'] == datetime(2016, 1, 1, tzinfo=timezone.utc)
assert result['date_from_inclusive']
assert result['date_to'] == datetime(2016, 1, 2, tzinfo=timezone.utc)
assert not result['date_to_inclusive']
def test_date_approx_day(self):
date_value = datetime(2016, 1, 1, tzinfo=timezone.utc)
result = self.parse_query('event.timestamp:2016-01-01')
assert result['date_from'] == date_value
assert result['date_from_inclusive']
assert result['date_to'] == date_value + timedelta(days=1)
assert not result['date_to_inclusive']
def test_date_approx_precise(self):
date_value = datetime(2016, 1, 1, tzinfo=timezone.utc)
result = self.parse_query('event.timestamp:2016-01-01T00:00:00')
assert result['date_from'] == date_value - timedelta(minutes=5)
assert result['date_from_inclusive']
assert result['date_to'] == date_value + timedelta(minutes=6)
assert not result['date_to_inclusive']
def test_active_range(self):
result = self.parse_query('activeSince:-24h activeSince:+12h')
assert result['active_at_from'] > timezone.now() - timedelta(hours=25)
assert result['active_at_from'] < timezone.now() - timedelta(hours=23)
assert result['active_at_to'] > timezone.now() - timedelta(hours=13)
assert result['active_at_to'] < timezone.now() - timedelta(hours=11)
def test_last_seen_range(self):
result = self.parse_query('lastSeen:-24h lastSeen:+12h')
assert result['last_seen_from'] > timezone.now() - timedelta(hours=25)
assert result['last_seen_from'] < timezone.now() - timedelta(hours=23)
assert result['last_seen_to'] > timezone.now() - timedelta(hours=13)
assert result['last_seen_to'] < timezone.now() - timedelta(hours=11)
def test_has_tag(self):
result = self.parse_query('has:foo')
assert result['tags']['foo'] == ANY
def test_has_user(self):
result = self.parse_query('has:user')
assert result['tags']['sentry:user'] == ANY
def test_has_release(self):
result = self.parse_query('has:release')
assert result['tags']['sentry:release'] == ANY
def test_quoted_string(self):
result = self.parse_query('"release:foo"')
assert result == {'tags': {}, 'query': 'release:foo'}
| |
# -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2015 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The PyBuilder execution module.
Deals with the execution of a PyBuilder process by
running tasks, actions and initializers in the correct
order regarding dependencies.
"""
import inspect
import copy
import traceback
import sys
import re
import types
from pybuilder.errors import (CircularTaskDependencyException,
DependenciesNotResolvedException,
InvalidNameException,
MissingTaskDependencyException,
RequiredTaskExclusionException,
MissingActionDependencyException,
NoSuchTaskException)
from pybuilder.utils import as_list, Timer, odict
from pybuilder.graph_utils import Graph, GraphHasCycles
if sys.version_info[0] < 3: # if major is less than 3
from .excp_util_2 import raise_exception
else:
from .excp_util_3 import raise_exception
def as_task_name_list(mixed):
result = []
for item in as_list(mixed):
if isinstance(item, types.FunctionType):
result.append(item.__name__)
else:
result.append(str(item))
return result
class Executable(object):
NAME_PATTERN = re.compile(r"^[a-zA-Z_][a-zA-Z0-9_]+$")
def __init__(self, name, callable, description=""):
if not Executable.NAME_PATTERN.match(name):
raise InvalidNameException(name)
self._name = name
self.description = description
self.callable = callable
if hasattr(callable, "__module__"):
self.source = callable.__module__
else:
self.source = "n/a"
if isinstance(self.callable, types.FunctionType):
self.parameters = inspect.getargspec(self.callable).args
else:
raise TypeError("Don't know how to handle callable %s" % callable)
@property
def name(self):
return self._name
def execute(self, argument_dict):
arguments = []
for parameter in self.parameters:
if parameter not in argument_dict:
raise ValueError("Invalid parameter '%s' for %s %s" % (parameter, self.__class__.__name__, self.name))
arguments.append(argument_dict[parameter])
self.callable(*arguments)
class Action(Executable):
def __init__(self, name, callable, before=None, after=None, description="", only_once=False, teardown=False):
super(Action, self).__init__(name, callable, description)
self.execute_before = as_task_name_list(before)
self.execute_after = as_task_name_list(after)
self.only_once = only_once
self.teardown = teardown
class Task(object):
def __init__(self, name, callable, dependencies=None, description="", optional_dependencies=None):
self.name = name
self.executables = [Executable(name, callable, description)]
self.dependencies = as_task_name_list(dependencies)
self.optional_dependencies = as_task_name_list(optional_dependencies)
self.description = [description]
def __eq__(self, other):
if isinstance(other, Task):
return self.name == other.name
return False
def __hash__(self):
return 9 * hash(self.name)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
if isinstance(other, Task):
return self.name < other.name
return self.name < other
def extend(self, task):
self.executables += task.executables
self.dependencies += task.dependencies
self.description += task.description
def execute(self, logger, argument_dict):
for executable in self.executables:
logger.debug("Executing subtask from %s", executable.source)
executable.execute(argument_dict)
class Initializer(Executable):
def __init__(self, name, callable, environments=None, description=""):
super(Initializer, self).__init__(name, callable, description)
self.environments = environments
def is_applicable(self, environments=None):
if not self.environments:
return True
for environment in as_list(environments):
if environment in self.environments:
return True
class TaskExecutionSummary(object):
def __init__(self, task, number_of_actions, execution_time):
self.task = task
self.number_of_actions = number_of_actions
self.execution_time = execution_time
class ExecutionManager(object):
def __init__(self, logger):
self.logger = logger
self._tasks = odict()
self._task_dependencies = odict()
self._actions = odict()
self._execute_before = odict()
self._execute_after = odict()
self._initializers = []
self._dependencies_resolved = False
self._actions_executed = []
self._tasks_executed = []
self._current_task = None
self._exclude_optional_tasks = []
self._exclude_tasks = []
@property
def initializers(self):
return self._initializers
@property
def tasks(self):
return list(self._tasks.values())
@property
def task_names(self):
return sorted(self._tasks.keys())
def register_initializer(self, initializer):
self.logger.debug("Registering initializer '%s'", initializer.name)
self._initializers.append(initializer)
def register_action(self, action):
self.logger.debug("Registering action '%s'", action.name)
self._actions[action.name] = action
def register_task(self, *tasks):
for task in tasks:
self.logger.debug("Registering task '%s'", task.name)
if task.name in self._tasks:
self._tasks[task.name].extend(task)
else:
self._tasks[task.name] = task
def execute_initializers(self, environments=None, **keyword_arguments):
for initializer in self._initializers:
if not initializer.is_applicable(environments):
message = "Not going to execute initializer '%s' from '%s' as environments do not match."
self.logger.debug(message, initializer.name, initializer.source)
else:
self.logger.debug("Executing initializer '%s' from '%s'",
initializer.name, initializer.source)
initializer.execute(keyword_arguments)
def assert_dependencies_resolved(self):
if not self._dependencies_resolved:
raise DependenciesNotResolvedException()
def execute_task(self, task, **keyword_arguments):
self.assert_dependencies_resolved()
self.logger.debug("Executing task '%s'",
task.name)
timer = Timer.start()
number_of_actions = 0
self._current_task = task
suppressed_errors = []
task_error = None
has_teardown_tasks = False
after_actions = self._execute_after[task.name]
for action in after_actions:
if action.teardown:
has_teardown_tasks = True
break
try:
for action in self._execute_before[task.name]:
if self.execute_action(action, keyword_arguments):
number_of_actions += 1
task.execute(self.logger, keyword_arguments)
except:
if not has_teardown_tasks:
raise
else:
task_error = sys.exc_info()
for action in after_actions:
try:
if not task_error or action.teardown:
if self.execute_action(action, keyword_arguments):
number_of_actions += 1
except:
if not has_teardown_tasks:
raise
elif task_error:
suppressed_errors.append((action, sys.exc_info()))
else:
task_error = sys.exc_info()
for suppressed_error in suppressed_errors:
action = suppressed_error[0]
action_error = suppressed_error[1]
self.logger.error("Executing action '%s' from '%s' resulted in an error that was suppressed:\n%s",
action.name, action.source,
"".join(traceback.format_exception(action_error[0], action_error[1], action_error[2])))
if task_error:
raise_exception(task_error[1], task_error[2])
self._current_task = None
if task not in self._tasks_executed:
self._tasks_executed.append(task)
timer.stop()
return TaskExecutionSummary(task.name, number_of_actions, timer.get_millis())
def execute_action(self, action, arguments):
if action.only_once and action in self._actions_executed:
message = "Action %s has been executed before and is marked as only_once, so will not be executed again"
self.logger.debug(message, action.name)
return False
self.logger.debug("Executing action '%s' from '%s' before task", action.name, action.source)
action.execute(arguments)
self._actions_executed.append(action)
return True
def execute_execution_plan(self, execution_plan, **keyword_arguments):
self.assert_dependencies_resolved()
summaries = []
for task in execution_plan:
summaries.append(self.execute_task(task, **keyword_arguments))
return summaries
def get_task(self, name):
if not self.has_task(name):
raise NoSuchTaskException(name)
return self._tasks[name]
def has_task(self, name):
return name in self._tasks
def _collect_transitive_tasks(self, task, visited=None):
if not visited:
visited = set()
if task in visited:
return visited
visited.add(task)
dependencies = [self.get_task(dependency_name) for dependency_name in task.dependencies]
for dependency in dependencies:
self._collect_transitive_tasks(dependency, visited)
return visited
def collect_all_transitive_tasks(self, task_names):
all_tasks = set()
for task_name in task_names:
all_tasks.update(self._collect_transitive_tasks(self.get_task(task_name)))
return all_tasks
def build_execution_plan(self, task_names):
self.assert_dependencies_resolved()
execution_plan = []
dependency_edges = {}
for task in self.collect_all_transitive_tasks(as_list(task_names)):
dependency_edges[task.name] = task.dependencies
try:
Graph(dependency_edges).assert_no_cycles_present()
except GraphHasCycles as cycles:
raise CircularTaskDependencyException(str(cycles))
for task_name in as_list(task_names):
self.enqueue_task(execution_plan, task_name)
return execution_plan
def build_shortest_execution_plan(self, task_names):
"""
Finds the shortest execution plan taking into the account tasks already executed
This is useful when you want to execute tasks dynamically without repeating pre-requisite
tasks you've already executed
"""
execution_plan = self.build_execution_plan(task_names)
shortest_plan = copy.copy(execution_plan)
for executed_task in self._tasks_executed:
candidate_task = shortest_plan[0]
if candidate_task.name not in task_names and candidate_task == executed_task:
shortest_plan.pop(0)
else:
break
if self._current_task and self._current_task in shortest_plan:
raise CircularTaskDependencyException("Task '%s' attempted to invoke tasks %s, "
"resulting in plan %s, creating circular dependency" %
(self._current_task, task_names, shortest_plan))
return shortest_plan
def enqueue_task(self, execution_plan, task_name):
task = self.get_task(task_name)
if task in execution_plan:
return
for dependency in self._task_dependencies[task.name]:
self.enqueue_task(execution_plan, dependency.name)
execution_plan.append(task)
def resolve_dependencies(self, exclude_optional_tasks=None, exclude_tasks=None):
self._exclude_optional_tasks = as_task_name_list(exclude_optional_tasks or [])
self._exclude_tasks = as_task_name_list(exclude_tasks or [])
for task in self._tasks.values():
self._execute_before[task.name] = []
self._execute_after[task.name] = []
self._task_dependencies[task.name] = []
if self.is_task_excluded(task.name) or self.is_optional_task_excluded(task.name):
self.logger.debug("Skipping resolution for excluded task '%s'", task.name)
continue
for d in task.dependencies:
if not self.has_task(d):
raise MissingTaskDependencyException(task.name, d)
if self.is_optional_task_excluded(d):
raise RequiredTaskExclusionException(task.name, d)
if not self.is_task_excluded(d):
self._task_dependencies[task.name].append(self.get_task(d))
self.logger.debug("Adding '%s' as a required dependency of task '%s'", d, task.name)
for d in task.optional_dependencies:
if not self.has_task(d):
raise MissingTaskDependencyException(task.name, d)
if not (self.is_task_excluded(d) or self.is_optional_task_excluded(d)):
self._task_dependencies[task.name].append(self.get_task(d))
self.logger.debug("Adding '%s' as an optional dependency of task '%s'", d, task.name)
for action in self._actions.values():
for task in action.execute_before:
if not self.has_task(task):
raise MissingActionDependencyException(action.name, task)
self._execute_before[task].append(action)
self.logger.debug("Adding before action '%s' for task '%s'", action.name, task)
for task in action.execute_after:
if not self.has_task(task):
raise MissingActionDependencyException(action.name, task)
self._execute_after[task].append(action)
self.logger.debug("Adding after action '%s' for task '%s'", action.name, task)
self._dependencies_resolved = True
def is_task_excluded(self, task):
return task in self._exclude_tasks
def is_optional_task_excluded(self, task):
return task in self._exclude_optional_tasks
| |
# Copyright 2013-2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pylint: disable=E1101
import os
import re
import tempfile
import json
from collections import defaultdict
from wlauto import AndroidUiAutoBenchmark, Parameter, Artifact
from wlauto.exceptions import ConfigError, WorkloadError
from wlauto.utils.misc import capitalize
import wlauto.common.android.resources
class Geekbench(AndroidUiAutoBenchmark):
name = 'geekbench'
description = """
Geekbench provides a comprehensive set of benchmarks engineered to quickly
and accurately measure processor and memory performance.
http://www.primatelabs.com/geekbench/
From the website:
Designed to make benchmarks easy to run and easy to understand, Geekbench
takes the guesswork out of producing robust and reliable benchmark results.
Geekbench scores are calibrated against a baseline score of 1,000 (which is
the score of a single-processor Power Mac G5 @ 1.6GHz). Higher scores are
better, with double the score indicating double the performance.
The benchmarks fall into one of four categories:
- integer performance.
- floating point performance.
- memory performance.
- stream performance.
Geekbench benchmarks: http://www.primatelabs.com/geekbench/doc/benchmarks.html
Geekbench scoring methedology:
http://support.primatelabs.com/kb/geekbench/interpreting-geekbench-scores
"""
summary_metrics = ['score', 'multicore_score']
versions = {
'3': {
'package': 'com.primatelabs.geekbench3',
'activity': '.HomeActivity',
},
'2': {
'package': 'ca.primatelabs.geekbench2',
'activity': '.HomeActivity',
},
}
begin_regex = re.compile(r'^\s*D/WebViewClassic.loadDataWithBaseURL\(\s*\d+\s*\)'
r'\s*:\s*(?P<content>\<.*)\s*$')
replace_regex = re.compile(r'<[^>]*>')
parameters = [
Parameter('version', default=sorted(versions.keys())[-1], allowed_values=sorted(versions.keys()),
description='Specifies which version of the workload should be run.'),
Parameter('times', kind=int, default=1,
description=('Specfies the number of times the benchmark will be run in a "tight '
'loop", i.e. without performaing setup/teardown inbetween.')),
]
@property
def activity(self):
return self.versions[self.version]['activity']
@property
def package(self):
return self.versions[self.version]['package']
def __init__(self, device, **kwargs):
super(Geekbench, self).__init__(device, **kwargs)
self.uiauto_params['version'] = self.version
self.uiauto_params['times'] = self.times
self.run_timeout = 5 * 60 * self.times
def initialize(self, context):
if self.version == '3' and not self.device.is_rooted:
raise WorkloadError('Geekbench workload only works on rooted devices.')
def init_resources(self, context):
self.apk_file = context.resolver.get(wlauto.common.android.resources.ApkFile(self), version=self.version)
self.uiauto_file = context.resolver.get(wlauto.common.android.resources.JarFile(self))
self.device_uiauto_file = self.device.path.join(self.device.working_directory,
os.path.basename(self.uiauto_file))
if not self.uiauto_package:
self.uiauto_package = os.path.splitext(os.path.basename(self.uiauto_file))[0]
def update_result(self, context):
super(Geekbench, self).update_result(context)
update_method = getattr(self, 'update_result_{}'.format(self.version))
update_method(context)
def validate(self):
if (self.times > 1) and (self.version == '2'):
raise ConfigError('times parameter is not supported for version 2 of Geekbench.')
def update_result_2(self, context):
score_calculator = GBScoreCalculator()
score_calculator.parse(self.logcat_log)
score_calculator.update_results(context)
def update_result_3(self, context):
outfile_glob = self.device.path.join(self.device.package_data_directory, self.package, 'files', '*gb3')
on_device_output_files = [f.strip() for f in
self.device.execute('ls {}'.format(outfile_glob), as_root=True).split('\n')]
for i, on_device_output_file in enumerate(on_device_output_files):
host_temp_file = tempfile.mktemp()
self.device.pull_file(on_device_output_file, host_temp_file)
host_output_file = os.path.join(context.output_directory, os.path.basename(on_device_output_file))
with open(host_temp_file) as fh:
data = json.load(fh)
os.remove(host_temp_file)
with open(host_output_file, 'w') as wfh:
json.dump(data, wfh, indent=4)
context.iteration_artifacts.append(Artifact('geekout', path=os.path.basename(on_device_output_file),
kind='data',
description='Geekbench 3 output from device.'))
context.result.add_metric(namemify('score', i), data['score'])
context.result.add_metric(namemify('multicore_score', i), data['multicore_score'])
for section in data['sections']:
context.result.add_metric(namemify(section['name'] + '_score', i), section['score'])
context.result.add_metric(namemify(section['name'] + '_multicore_score', i),
section['multicore_score'])
class GBWorkload(object):
"""
Geekbench workload (not to be confused with WA's workloads). This is a single test run by
geek bench, such as preforming compression or generating Madelbrot.
"""
# Index maps onto the hundreds digit of the ID.
categories = [None, 'integer', 'float', 'memory', 'stream']
# 2003 entry-level Power Mac G5 is considered to have a baseline score of
# 1000 for every category.
pmac_g5_base_score = 1000
units_conversion_map = {
'K': 1,
'M': 1000,
'G': 1000000,
}
def __init__(self, wlid, name, pmac_g5_st_score, pmac_g5_mt_score):
"""
:param wlid: A three-digit workload ID. Uniquely identifies a workload and also
determines the category a workload belongs to.
:param name: The name of the workload.
:param pmac_g5_st_score: Score achieved for this workload on 2003 entry-level
Power Mac G5 running in a single thread.
:param pmac_g5_mt_score: Score achieved for this workload on 2003 entry-level
Power Mac G5 running in multiple threads.
"""
self.wlid = wlid
self.name = name
self.pmac_g5_st_score = pmac_g5_st_score
self.pmac_g5_mt_score = pmac_g5_mt_score
self.category = self.categories[int(wlid) // 100]
self.collected_results = []
def add_result(self, value, units):
self.collected_results.append(self.convert_to_kilo(value, units))
def convert_to_kilo(self, value, units):
return value * self.units_conversion_map[units[0]]
def clear(self):
self.collected_results = []
def get_scores(self):
"""
Returns a tuple (single-thraded score, multi-threaded score) for this workload.
Some workloads only have a single-threaded score, in which case multi-threaded
score will be ``None``.
Geekbench will perform four iterations of each workload in single-threaded and,
for some workloads, multi-threaded configurations. Thus there should always be
either four or eight scores collected for each workload. Single-threaded iterations
are always done before multi-threaded, so the ordering of the scores can be used
to determine which configuration they belong to.
This method should not be called before score collection has finished.
"""
no_of_results = len(self.collected_results)
if no_of_results == 4:
return (self._calculate(self.collected_results[:4], self.pmac_g5_st_score), None)
if no_of_results == 8:
return (self._calculate(self.collected_results[:4], self.pmac_g5_st_score),
self._calculate(self.collected_results[4:], self.pmac_g5_mt_score))
else:
msg = 'Collected {} results for Geekbench {} workload;'.format(no_of_results, self.name)
msg += ' expecting either 4 or 8.'
raise WorkloadError(msg)
def _calculate(self, values, scale_factor):
return max(values) * self.pmac_g5_base_score / scale_factor
def __str__(self):
return self.name
__repr__ = __str__
class GBScoreCalculator(object):
"""
Parses logcat output to extract raw Geekbench workload values and converts them into
category and overall scores.
"""
result_regex = re.compile(r'workload (?P<id>\d+) (?P<value>[0-9.]+) '
r'(?P<units>[a-zA-Z/]+) (?P<time>[0-9.]+)s')
# Indicates contribution to the overall score.
category_weights = {
'integer': 0.3357231,
'float': 0.3594,
'memory': 0.1926489,
'stream': 0.1054738,
}
#pylint: disable=C0326
workloads = [
# ID Name Power Mac ST Power Mac MT
GBWorkload(101, 'Blowfish', 43971, 40979),
GBWorkload(102, 'Text Compress', 3202, 3280),
GBWorkload(103, 'Text Decompress', 4112, 3986),
GBWorkload(104, 'Image Compress', 8272, 8412),
GBWorkload(105, 'Image Decompress', 16800, 16330),
GBWorkload(107, 'Lua', 385, 385),
GBWorkload(201, 'Mandelbrot', 665589, 653746),
GBWorkload(202, 'Dot Product', 481449, 455422),
GBWorkload(203, 'LU Decomposition', 889933, 877657),
GBWorkload(204, 'Primality Test', 149394, 185502),
GBWorkload(205, 'Sharpen Image', 2340, 2304),
GBWorkload(206, 'Blur Image', 791, 787),
GBWorkload(302, 'Read Sequential', 1226708, None),
GBWorkload(304, 'Write Sequential', 683782, None),
GBWorkload(306, 'Stdlib Allocate', 3739, None),
GBWorkload(307, 'Stdlib Write', 2070681, None),
GBWorkload(308, 'Stdlib Copy', 1030360, None),
GBWorkload(401, 'Stream Copy', 1367892, None),
GBWorkload(402, 'Stream Scale', 1296053, None),
GBWorkload(403, 'Stream Add', 1507115, None),
GBWorkload(404, 'Stream Triad', 1384526, None),
]
def __init__(self):
self.workload_map = {wl.wlid: wl for wl in self.workloads}
def parse(self, filepath):
"""
Extract results from the specified file. The file should contain a logcat log of Geekbench execution.
Iteration results in the log appear as 'I/geekbench' category entries in the following format::
| worklod ID value units timing
| \------------- | ----/ ---/
| | | | |
| I/geekbench(29026): [....] workload 101 132.9 MB/sec 0.0300939s
| | |
| | -----\
| label random crap we don't care about
"""
for wl in self.workloads:
wl.clear()
with open(filepath) as fh:
for line in fh:
match = self.result_regex.search(line)
if match:
wkload = self.workload_map[int(match.group('id'))]
wkload.add_result(float(match.group('value')), match.group('units'))
def update_results(self, context):
"""
http://support.primatelabs.com/kb/geekbench/interpreting-geekbench-2-scores
From the website:
Each workload's performance is compared against a baseline to determine a score. These
scores are averaged together to determine an overall, or Geekbench, score for the system.
Geekbench uses the 2003 entry-level Power Mac G5 as the baseline with a score of 1,000
points. Higher scores are better, with double the score indicating double the performance.
Geekbench provides three different kinds of scores:
:Workload Scores: Each time a workload is executed Geekbench calculates a score based
on the computer's performance compared to the baseline
performance. There can be multiple workload scores for the
same workload as Geekbench can execute each workload multiple
times with different settings. For example, the "Dot Product"
workload is executed four times (single-threaded scalar code,
multi-threaded scalar code, single-threaded vector code, and
multi-threaded vector code) producing four "Dot Product" scores.
:Section Scores: A section score is the average of all the workload scores for
workloads that are part of the section. These scores are useful
for determining the performance of the computer in a particular
area. See the section descriptions above for a summary on what
each section measures.
:Geekbench Score: The Geekbench score is the weighted average of the four section
scores. The Geekbench score provides a way to quickly compare
performance across different computers and different platforms
without getting bogged down in details.
"""
scores_by_category = defaultdict(list)
for wkload in self.workloads:
st_score, mt_score = wkload.get_scores()
scores_by_category[wkload.category].append(st_score)
context.result.add_metric(wkload.name + ' (single-threaded)', int(st_score))
if mt_score is not None:
scores_by_category[wkload.category].append(mt_score)
context.result.add_metric(wkload.name + ' (multi-threaded)', int(mt_score))
overall_score = 0
for category in scores_by_category:
scores = scores_by_category[category]
category_score = sum(scores) / len(scores)
overall_score += category_score * self.category_weights[category]
context.result.add_metric(capitalize(category) + ' Score', int(category_score))
context.result.add_metric('Geekbench Score', int(overall_score))
def namemify(basename, i):
return basename + (' {}'.format(i) if i else '')
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
import copy
import mistune
from .parsers.poll import PollParser
class BlockGrammar(mistune.BlockGrammar):
# todo: remove all *_link
#link_block = re.compile(
# r'^https?://[^\s]+'
# r'(?:\n+|$)'
#)
audio_link = re.compile(
r'^https?://[^\s]+\.(mp3|ogg|wav)'
r'(\?[^\s]+)?'
r'(?:\n+|$)'
)
image_link = re.compile(
r'^https?://[^\s]+/(?P<image_name>[^\s]+)\.'
r'(?P<extension>png|jpg|jpeg|gif|bmp|tif|tiff)'
r'(\?[^\s]+)?'
r'(?:\n+|$)'
)
video_link = re.compile(
r'^https?://[^\s]+\.(mov|mp4|webm|ogv)'
r'(\?[^\s]+)?'
r'(?:\n+|$)'
)
# Try to get the video ID. Works for URLs of the form:
# * https://www.youtube.com/watch?v=Z0UISCEe52Y
# * http://youtu.be/afyK1HSFfgw
# * https://www.youtube.com/embed/vsF0K3Ou1v0
#
# Also works for timestamps:
# * https://www.youtube.com/watch?v=Z0UISCEe52Y&t=1m30s
# * https://www.youtube.com/watch?v=O1QQajfobPw&t=1h1m38s
# * https://www.youtube.com/watch?v=O1QQajfobPw&feature=youtu.be&t=3698
# * https://youtu.be/O1QQajfobPw?t=3698
# * https://youtu.be/O1QQajfobPw?t=1h1m38s
#
youtube = re.compile(
r'^https?://(www\.)?'
r'(youtube\.com/watch\?v='
r'|youtu\.be/'
r'|youtube\.com/embed/)'
r'(?P<id>[a-zA-Z0-9_\-]{11})'
r'((&|\?)('
r'|(t=(?P<start_hours>[0-9]{1,2}h)?(?P<start_minutes>[0-9]{1,4}m)?(?P<start_seconds>[0-9]{1,5}s?)?)'
r'|([^&\s]+)'
r')){,10}'
r'(?:\n+|$)'
)
# Try to get the video ID. Works for URLs of the form:
# * https://vimeo.com/11111111
# * https://www.vimeo.com/11111111
# * https://player.vimeo.com/video/11111111
# * https://vimeo.com/channels/11111111
# * https://vimeo.com/groups/name/videos/11111111
# * https://vimeo.com/album/2222222/video/11111111
# * https://vimeo.com/11111111?param=value
vimeo = re.compile(
r'^https?://(www\.|player\.)?'
r'vimeo\.com/'
r'(channels/'
r'|groups/[^/]+/videos/'
r'|album/(\d+)/video/'
r'|video/)?'
r'(?P<id>\d+)'
r'(\?[^\s]+)?'
r'(?:\n+|$)'
)
# Try to get the video ID. Works for URLs of the form:
# * https://gfycat.com/videoid
# * https://www.gfycat.com/videoid
# * http://gfycat.com/videoid
# * http://www.gfycat.com/videoid
gfycat = re.compile(
r'^https?://(www\.)?'
r'gfycat\.com/'
r'(?P<id>\w+)'
r'(\?[^\s]+)?'
r'(?:\n+|$)'
)
# Try to get the channel. Works for URLs of the form:
# https://www.twitch.tv/lirik
twitch_channel = re.compile(
r'^https?://(www\.)?'
r'twitch\.tv/'
r'(?P<channel>\w+)'
)
# Try to get the video. Works for URLs of the form:
# https://www.twitch.tv/videos/432540384
twitch_video = re.compile(
r'^https?://(www\.)?'
r'twitch\.tv/videos/'
r'(?P<video_id>\w+)'
)
# Capture polls:
# [poll name=foo min=1 max=1 close=1d mode=default]
# # Which opt you prefer?
# 1. opt 1
# 2. opt 2
# [/poll]
poll = re.compile(
r'^(?:\[poll'
r'((?:\s+name=(?P<name>[\w\-_]+))'
r'(?:\s+min=(?P<min>\d+))?'
r'(?:\s+max=(?P<max>\d+))?'
r'(?:\s+close=(?P<close>\d+)d)?'
r'(?:\s+mode=(?P<mode>(default|secret)))?'
r'|(?P<invalid_params>[^\]]*))'
r'\])\n'
r'((?:#\s*(?P<title>[^\n]+\n))?'
r'(?P<choices>(?:\d+\.\s*[^\n]+\n){2,})'
r'|(?P<invalid_body>(?:[^\n]+\n)*))'
r'(?:\[/poll\])'
)
class BlockLexer(mistune.BlockLexer):
default_rules = copy.copy(mistune.BlockLexer.default_rules)
default_rules.insert(0, 'audio_link')
default_rules.insert(0, 'image_link')
default_rules.insert(0, 'video_link')
default_rules.insert(0, 'youtube')
default_rules.insert(0, 'vimeo')
default_rules.insert(0, 'gfycat')
default_rules.insert(0, 'twitch_channel')
default_rules.insert(0, 'twitch_video')
default_rules.insert(0, 'poll')
def __init__(self, rules=None, **kwargs):
if rules is None:
rules = BlockGrammar()
super(BlockLexer, self).__init__(rules=rules, **kwargs)
self.polls = {
'polls': [],
'choices': []
}
def parse_audio_link(self, m):
self.tokens.append({
'type': 'audio_link',
'link': m.group(0).strip()
})
def parse_image_link(self, m):
link = m.group(0).strip()
title = m.group('image_name').strip()
self.tokens.append({
'type': 'image_link',
'src': link,
'title': title,
'text': title
})
def parse_video_link(self, m):
self.tokens.append({
'type': 'video_link',
'link': m.group(0).strip()
})
def parse_youtube(self, m):
self.tokens.append({
'type': 'youtube',
'video_id': m.group("id"),
'start_hours': m.group("start_hours"),
'start_minutes': m.group("start_minutes"),
'start_seconds': m.group("start_seconds"),
})
def parse_vimeo(self, m):
self.tokens.append({
'type': 'vimeo',
'video_id': m.group("id")
})
def parse_gfycat(self, m):
self.tokens.append({
'type': 'gfycat',
'video_id': m.group("id")
})
def parse_twitch_channel(self, m):
self.tokens.append({
'type': 'twitch_channel',
'channel': m.group("channel")
})
def parse_twitch_video(self, m):
self.tokens.append({
'type': 'twitch_video',
'video_id': m.group("video_id")
})
def parse_poll(self, m):
parser = PollParser(polls=self.polls, data=m.groupdict())
if parser.is_valid():
poll = parser.cleaned_data['poll']
choices = parser.cleaned_data['choices']
self.polls['polls'].append(poll)
self.polls['choices'].extend(choices)
self.tokens.append({
'type': 'poll',
'name': poll['name']
})
else:
self.tokens.append({
'type': 'poll',
'raw': m.group(0)
})
| |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2015-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Testing clusters
"""
from bigmler.tests.world import (world, common_setup_module,
common_teardown_module, teardown_class)
import bigmler.tests.basic_tst_prediction_steps as test_pred
import bigmler.tests.basic_cluster_prediction_steps as test_cluster
def setup_module():
"""Setup for the module
"""
common_setup_module()
def teardown_module():
"""Teardown for the module
"""
common_teardown_module()
class TestCluster(object):
def teardown(self):
"""Calling generic teardown for every method
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
teardown_class()
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully building test centroids from scratch:
Given I create BigML resources uploading train "<data>" file to create centroids for "<test>" and log predictions in "<output>"
And I check that the source has been created
And I check that the dataset has been created
And I check that the cluster has been created
And I check that the centroids are ready
Then the local centroids file is like "<predictions_file>"
Examples:
| data | test | output |predictions_file |
| ../data/grades.csv | ../data/grades.csv | ./scenario_c_1_r/centroids.csv | ./check_files/centroids_grades.csv |
| ../data/diabetes.csv | ../data/diabetes.csv | ./scenario_c_1/centroids.csv | ./check_files/centroids_diabetes.csv |
"""
print(self.test_scenario1.__doc__)
examples = [
['data/grades.csv', 'data/grades.csv', 'scenario_c_1_r/centroids.csv', 'check_files/centroids_grades.csv'],
['data/diabetes.csv', 'data/diabetes.csv', 'scenario_c_1/centroids.csv', 'check_files/centroids_diabetes.csv']]
for example in examples:
print("\nTesting with:\n", example)
test_cluster.i_create_all_cluster_resources(self, data=example[0], test=example[1], output=example[2])
test_pred.i_check_create_source(self)
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_cluster(self)
test_cluster.i_check_create_centroids(self)
test_pred.i_check_predictions(self, example[3])
def test_scenario2(self):
"""
Scenario: Successfully building test predictions from source
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML resources using source to find centroids for "<test>" and log predictions in "<output>"
And I check that the dataset has been created
And I check that the cluster has been created
And I check that the centroids are ready
Then the local centroids file is like "<predictions_file>"
Examples:
|scenario | kwargs | test | output |predictions_file |
| scenario_c_1| {"data": "../data/diabetes.csv", "output": "./scenario_c_1/centroids.csv", "test": "../data/diabetes.csv"} | ../data/diabetes.csv | ./scenario_c_2/centroids.csv | ./check_files/centroids_diabetes.csv |
"""
print(self.test_scenario2.__doc__)
examples = [
['scenario_c_1', '{"data": "data/diabetes.csv", "output": "scenario_c_1/centroids.csv", "test": "data/diabetes.csv"}', 'data/diabetes.csv', 'scenario_c_2/centroids.csv', 'check_files/centroids_diabetes.csv']]
for example in examples:
print("\nTesting with:\n", example)
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_cluster.i_create_cluster_resources_from_source(self, test=example[2], output=example[3])
test_pred.i_check_create_dataset(self, suffix=None)
test_pred.i_check_create_cluster(self)
test_cluster.i_check_create_centroids(self)
test_pred.i_check_predictions(self, example[4])
def test_scenario3(self):
"""
Scenario: Successfully building test predictions from dataset
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML resources using dataset to find centroids for "<test>" and log predictions in "<output>"
And I check that the cluster has been created
And I check that the centroids are ready
Then the local centroids file is like "<predictions_file>"
Examples:
|scenario | kwargs | test | output |predictions_file |
| scenario_c_1| {"data": "../data/diabetes.csv", "output": "./scenario_c_1/centroids.csv", "test": "../data/diabetes.csv"} | ../data/diabetes.csv | ./scenario_c_3/centroids.csv | ./check_files/centroids_diabetes.csv |
"""
print(self.test_scenario3.__doc__)
examples = [
['scenario_c_1', '{"data": "data/diabetes.csv", "output": "scenario_c_1/centroids.csv", "test": "data/diabetes.csv"}', 'data/diabetes.csv', 'scenario_c_3/centroids.csv', 'check_files/centroids_diabetes.csv']]
for example in examples:
print("\nTesting with:\n", example)
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_cluster.i_create_cluster_resources_from_dataset(self, test=example[2], output=example[3])
test_pred.i_check_create_cluster(self)
test_cluster.i_check_create_centroids(self)
test_pred.i_check_predictions(self, example[4])
def test_scenario4(self):
"""
Scenario: Successfully building test predictions from cluster
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML resources using cluster to find centroids for "<test>" and log predictions in "<output>"
And I check that the centroids are ready
Then the local centroids file is like "<predictions_file>"
Examples:
|scenario | kwargs | test | output |predictions_file |
| scenario_c_1| {"data": "../data/diabetes.csv", "output": "./scenario_c_1/centroids.csv", "test": "../data/diabetes.csv"} | ../data/diabetes.csv | ./scenario_c_4/centroids.csv | ./check_files/centroids_diabetes.csv |
"""
print(self.test_scenario4.__doc__)
examples = [
['scenario_c_1', '{"data": "data/diabetes.csv", "output": "scenario_c_1/centroids.csv", "test": "data/diabetes.csv"}', 'data/diabetes.csv', 'scenario_c_4/centroids.csv', 'check_files/centroids_diabetes.csv']]
for example in examples:
print("\nTesting with:\n", example)
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_cluster.i_create_cluster_resources_from_cluster(self, test=example[2], output=example[3])
test_cluster.i_check_create_centroids(self)
test_pred.i_check_predictions(self, example[4])
def test_scenario5(self):
"""
Scenario: Successfully building test predictions from clusters file
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML resources using clusters in file "<clusters_file>" to find centroids for "<test>" and log predictions in "<output>"
And I check that the centroids are ready
Then the local centroids file is like "<predictions_file>"
Examples:
|scenario | kwargs | clusters_file | test | output |predictions_file |
| scenario_c_1| {"data": "../data/diabetes.csv", "output": "./scenario_c_1/centroids.csv", "test": "../data/diabetes.csv"} | ./scenario_c_1/clusters | ../data/diabetes.csv | ./scenario_c_5/centroids.csv | ./check_files/centroids_diabetes.csv |
"""
print(self.test_scenario5.__doc__)
examples = [
['scenario_c_1', '{"data": "data/diabetes.csv", "output": "scenario_c_1/centroids.csv", "test": "data/diabetes.csv"}', 'scenario_c_1/clusters', 'data/diabetes.csv', 'scenario_c_5/centroids.csv', 'check_files/centroids_diabetes.csv']]
for example in examples:
print("\nTesting with:\n", example)
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_cluster.i_create_cluster_resources_from_clusters_file(self, clusters_file=example[2], test=example[3], output=example[4])
test_cluster.i_check_create_centroids(self)
test_pred.i_check_predictions(self, example[5])
def test_scenario6(self):
"""
Scenario: Successfully generating datasets from cluster centroids
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I generate datasets for "<centroid_names>" centroids and log predictions in "<output>"
Then I check that the <datasets_number> cluster datasets are ready
Examples:
|scenario | kwargs | centroid_names | output | datasets_number |
| scenario_c_1| {"data": "../data/diabetes.csv", "output": "./scenario_c_1/centroids.csv", "test": "../data/diabetes.csv"} | Cluster 1,Cluster 2 | ./scenario_c_6/centroids.csv | 2 |
"""
print(self.test_scenario6.__doc__)
examples = [
['scenario_c_1', '{"data": "data/diabetes.csv", "output": "scenario_c_1/centroids.csv", "test": "data/diabetes.csv"}', 'Cluster 1,Cluster 2', 'scenario_c_6/centroids.csv', '2']]
for example in examples:
print("\nTesting with:\n", example)
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_cluster.i_create_datasets_from_cluster(self, centroids=example[2], output=example[3])
test_cluster.i_check_cluster_datasets(self, datasets_number=example[4])
def test_scenario7(self):
"""
Scenario: Successfully building test predictions from cluster
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML resources using local cluster in "<scenario>" to find centroids for "<test>" and log predictions in "<output>"
And I check that the centroids are ready
Then the local centroids file is like "<predictions_file>"
Examples:
|scenario | kwargs | test | output |predictions_file |
| scenario_c_1| {"data": "../data/diabetes.csv", "output": "./scenario_c_1/centroids.csv", "test": "../data/diabetes.csv"} | ../data/diabetes.csv | ./scenario_c_7/centroids.csv | ./check_files/centroids_diabetes.csv |
"""
print(self.test_scenario7.__doc__)
examples = [
['scenario_c_1', '{"data": "data/diabetes.csv", "output": "scenario_c_1/centroids.csv", "test": "data/diabetes.csv"}', 'data/diabetes.csv', 'scenario_c_7/centroids.csv', 'check_files/centroids_diabetes.csv']]
for example in examples:
print("\nTesting with:\n", example)
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_cluster.i_create_cluster_resources_from_local_cluster(self, directory=example[0], test=example[2], output=example[3])
test_cluster.i_check_create_centroids(self)
test_pred.i_check_predictions(self, example[4])
def test_scenario8(self):
"""
Scenario: Successfully generating models from cluster centroids
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I generate models for "<centroid_names>" centroids and log results in "<output>"
Then I check that the <model_number> cluster models are ready
Examples:
|scenario | kwargs | centroid_names | output | datasets_number |
| scenario_c_1| {"data": "../data/diabetes.csv", "output": "./scenario_c_1/centroids.csv", "test": "../data/diabetes.csv"} | Cluster 1,Cluster 2 | ./scenario_c_8/centroids.csv | 2 |
"""
print(self.test_scenario8.__doc__)
examples = [
['scenario_c_1', '{"data": "data/diabetes.csv", "output": "scenario_c_1/centroids.csv", "test": "data/diabetes.csv"}', 'Cluster 1,Cluster 2', 'scenario_c_8/centroids.csv', '2']]
for example in examples:
print("\nTesting with:\n", example)
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_cluster.i_create_models_from_cluster(self, centroids=example[2], output=example[3])
test_cluster.i_check_create_cluster(self)
test_cluster.i_check_cluster_models(self, models_number=example[4])
def test_scenario9(self):
"""
Scenario: Successfully building test predictions from dataset with summary fields
Given I have previously executed "<scenario>" or reproduce it with arguments <kwargs>
And I create BigML cluster using dataset and summary fields "<summary_fields>" and log resources in "<output_dir>"
And I check that the cluster has been created
Then the cluster has summary fields "<summary_fields>"
Examples:
|scenario | kwargs | output-dir |summary_fields |
| scenario_c_1| {"data": "../data/diabetes.csv", "output": "./scenario_c_1/centroids.csv", "test": "../data/diabetes.csv"} | scenario_c_9/ | diabetes,age
"""
print(self.test_scenario9.__doc__)
examples = [
['scenario_c_1', '{"data": "data/diabetes.csv", "output": "scenario_c_1/centroids.csv", "test": "data/diabetes.csv"}', 'scenario_c_9', '000008,000007']]
for example in examples:
print("\nTesting with:\n", example)
test_pred.i_have_previous_scenario_or_reproduce_it(self, example[0], example[1])
test_cluster.i_create_cluster_from_dataset_with_summary_fields(self, summary_fields=example[3], output_dir=example[2])
test_pred.i_check_create_cluster(self)
test_cluster.i_check_cluster_has_summary_fields(self, example[3])
| |
# Authors:
# Trevor Perrin
# Dave Baggett (Arcode Corporation) - cleanup handling of constants
# Yngve Pettersen (ported by Paul Sokolovsky) - TLS 1.2
#
# See the LICENSE file for legal information regarding use of this file.
"""Class for setting handshake parameters."""
from .constants import CertificateType
from .utils import cryptomath
from .utils import cipherfactory
CIPHER_NAMES = ["aes128gcm", "rc4", "aes256", "aes128", "3des"]
MAC_NAMES = ["sha", "sha256", "aead"] # Don't allow "md5" by default.
ALL_MAC_NAMES = MAC_NAMES + ["md5"]
KEY_EXCHANGE_NAMES = ["rsa", "dhe_rsa", "ecdhe_rsa", "srp_sha", "srp_sha_rsa", "dh_anon"]
CIPHER_IMPLEMENTATIONS = ["openssl", "pycrypto", "python"]
CERTIFICATE_TYPES = ["x509"]
TLS_INTOLERANCE_TYPES = ["alert", "close", "reset"]
class HandshakeSettings(object):
"""This class encapsulates various parameters that can be used with
a TLS handshake.
@sort: minKeySize, maxKeySize, cipherNames, macNames, certificateTypes,
minVersion, maxVersion
@type minKeySize: int
@ivar minKeySize: The minimum bit length for asymmetric keys.
If the other party tries to use SRP, RSA, or Diffie-Hellman
parameters smaller than this length, an alert will be
signalled. The default is 1023.
@type maxKeySize: int
@ivar maxKeySize: The maximum bit length for asymmetric keys.
If the other party tries to use SRP, RSA, or Diffie-Hellman
parameters larger than this length, an alert will be signalled.
The default is 8193.
@type cipherNames: list
@ivar cipherNames: The allowed ciphers.
The allowed values in this list are 'aes256', 'aes128', '3des', and
'rc4'. If these settings are used with a client handshake, they
determine the order of the ciphersuites offered in the ClientHello
message.
If these settings are used with a server handshake, the server will
choose whichever ciphersuite matches the earliest entry in this
list.
NOTE: If '3des' is used in this list, but TLS Lite can't find an
add-on library that supports 3DES, then '3des' will be silently
removed.
The default value is ['rc4', 'aes256', 'aes128', '3des'].
@type macNames: list
@ivar macNames: The allowed MAC algorithms.
The allowed values in this list are 'sha' and 'md5'.
The default value is ['sha'].
@type certificateTypes: list
@ivar certificateTypes: The allowed certificate types.
The only allowed certificate type is 'x509'. This list is only used with a
client handshake. The client will advertise to the server which certificate
types are supported, and will check that the server uses one of the
appropriate types.
@type minVersion: tuple
@ivar minVersion: The minimum allowed SSL/TLS version.
This variable can be set to (3,0) for SSL 3.0, (3,1) for TLS 1.0, (3,2) for
TLS 1.1, or (3,3) for TLS 1.2. If the other party wishes to use a lower
version, a protocol_version alert will be signalled. The default is (3,1).
@type maxVersion: tuple
@ivar maxVersion: The maximum allowed SSL/TLS version.
This variable can be set to (3,0) for SSL 3.0, (3,1) for TLS 1.0, (3,2) for
TLS 1.1, or (3,3) for TLS 1.2. If the other party wishes to use a higher
version, a protocol_version alert will be signalled. The default is (3,3).
(WARNING: Some servers may (improperly) reject clients which offer support
for TLS 1.1. In this case, try lowering maxVersion to (3,1)).
@type tlsIntolerant: tuple
@ivar tlsIntolerant: The TLS ClientHello version which the server
simulates intolerance of.
If tlsIntolerant is not None, the server will simulate TLS version
intolerance by aborting the handshake in response to all TLS versions
tlsIntolerant or higher.
@type tlsIntoleranceType: str
@ivar tlsIntoleranceType: How the server should react when simulating TLS
intolerance.
The allowed values are "alert" (return a fatal handshake_failure alert),
"close" (abruptly close the connection), and "reset" (send a TCP reset).
@type useExperimentalTackExtension: bool
@ivar useExperimentalTackExtension: Whether to enabled TACK support.
@type alertAfterHandshake: bool
@ivar alertAfterHandshake: If true, the server will send a fatal
alert immediately after the handshake completes.
@type enableExtendedMasterSecret: bool
@ivar enableExtendedMasterSecret: If true, the server supports the extended
master secret TLS extension and will negotiated it with supporting clients.
Note that TACK support is not standardized by IETF and uses a temporary
TLS Extension number, so should NOT be used in production software.
"""
def __init__(self):
self.minKeySize = 1023
self.maxKeySize = 8193
self.cipherNames = CIPHER_NAMES
self.macNames = MAC_NAMES
self.keyExchangeNames = KEY_EXCHANGE_NAMES
self.cipherImplementations = CIPHER_IMPLEMENTATIONS
self.certificateTypes = CERTIFICATE_TYPES
self.minVersion = (3,1)
self.maxVersion = (3,3)
self.tlsIntolerant = None
self.tlsIntoleranceType = 'alert'
self.useExperimentalTackExtension = False
self.alertAfterHandshake = False
self.enableExtendedMasterSecret = True
# Validates the min/max fields, and certificateTypes
# Filters out unsupported cipherNames and cipherImplementations
def _filter(self):
other = HandshakeSettings()
other.minKeySize = self.minKeySize
other.maxKeySize = self.maxKeySize
other.cipherNames = self.cipherNames
other.macNames = self.macNames
other.keyExchangeNames = self.keyExchangeNames
other.cipherImplementations = self.cipherImplementations
other.certificateTypes = self.certificateTypes
other.minVersion = self.minVersion
other.maxVersion = self.maxVersion
other.tlsIntolerant = self.tlsIntolerant
other.tlsIntoleranceType = self.tlsIntoleranceType
other.alertAfterHandshake = self.alertAfterHandshake
other.enableExtendedMasterSecret = self.enableExtendedMasterSecret
if not cipherfactory.tripleDESPresent:
other.cipherNames = [e for e in self.cipherNames if e != "3des"]
if len(other.cipherNames)==0:
raise ValueError("No supported ciphers")
if len(other.certificateTypes)==0:
raise ValueError("No supported certificate types")
if not cryptomath.m2cryptoLoaded:
other.cipherImplementations = \
[e for e in other.cipherImplementations if e != "openssl"]
if not cryptomath.pycryptoLoaded:
other.cipherImplementations = \
[e for e in other.cipherImplementations if e != "pycrypto"]
if len(other.cipherImplementations)==0:
raise ValueError("No supported cipher implementations")
if other.minKeySize<512:
raise ValueError("minKeySize too small")
if other.minKeySize>16384:
raise ValueError("minKeySize too large")
if other.maxKeySize<512:
raise ValueError("maxKeySize too small")
if other.maxKeySize>16384:
raise ValueError("maxKeySize too large")
for s in other.cipherNames:
if s not in CIPHER_NAMES:
raise ValueError("Unknown cipher name: '%s'" % s)
for s in other.macNames:
if s not in ALL_MAC_NAMES:
raise ValueError("Unknown MAC name: '%s'" % s)
for s in other.keyExchangeNames:
if s not in KEY_EXCHANGE_NAMES:
raise ValueError("Unknown key exchange name: '%s'" % s)
for s in other.cipherImplementations:
if s not in CIPHER_IMPLEMENTATIONS:
raise ValueError("Unknown cipher implementation: '%s'" % s)
for s in other.certificateTypes:
if s not in CERTIFICATE_TYPES:
raise ValueError("Unknown certificate type: '%s'" % s)
if other.tlsIntoleranceType not in TLS_INTOLERANCE_TYPES:
raise ValueError(
"Unknown TLS intolerance type: '%s'" % other.tlsIntoleranceType)
if other.minVersion > other.maxVersion:
raise ValueError("Versions set incorrectly")
if not other.minVersion in ((3,0), (3,1), (3,2), (3,3)):
raise ValueError("minVersion set incorrectly")
if not other.maxVersion in ((3,0), (3,1), (3,2), (3,3)):
raise ValueError("maxVersion set incorrectly")
return other
def _getCertificateTypes(self):
l = []
for ct in self.certificateTypes:
if ct == "x509":
l.append(CertificateType.x509)
else:
raise AssertionError()
return l
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from oslo_utils import units
from nova.compute import arch
from nova.compute import claims
from nova.compute import hv_type
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import exception as exc
from nova import objects
from nova.objects import base as obj_base
from nova.pci import manager as pci_manager
from nova import test
_HOSTNAME = 'fake-host'
_NODENAME = 'fake-node'
_VIRT_DRIVER_AVAIL_RESOURCES = {
'vcpus': 4,
'memory_mb': 512,
'local_gb': 6,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': _NODENAME,
'cpu_info': '',
'numa_topology': None,
}
_COMPUTE_NODE_FIXTURES = [
objects.ComputeNode(
id=1,
host=_HOSTNAME,
vcpus=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus'],
memory_mb=_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'],
local_gb=_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'],
vcpus_used=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus_used'],
memory_mb_used=_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used'],
local_gb_used=_VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used'],
hypervisor_type='fake',
hypervisor_version=0,
hypervisor_hostname=_HOSTNAME,
free_ram_mb=(_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'] -
_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used']),
free_disk_gb=(_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'] -
_VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used']),
current_workload=0,
running_vms=0,
cpu_info='{}',
disk_available_least=0,
host_ip='1.1.1.1',
supported_hv_specs=[
objects.HVSpec.from_list([arch.I686, hv_type.KVM, vm_mode.HVM])
],
metrics=None,
pci_device_pools=None,
extra_resources=None,
stats={},
numa_topology=None,
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0,
),
]
_INSTANCE_TYPE_FIXTURES = {
1: {
'id': 1,
'flavorid': 'fakeid-1',
'name': 'fake1.small',
'memory_mb': 128,
'vcpus': 1,
'root_gb': 1,
'ephemeral_gb': 0,
'swap': 0,
'rxtx_factor': 0,
'vcpu_weight': 1,
'extra_specs': {},
},
2: {
'id': 2,
'flavorid': 'fakeid-2',
'name': 'fake1.medium',
'memory_mb': 256,
'vcpus': 2,
'root_gb': 5,
'ephemeral_gb': 0,
'swap': 0,
'rxtx_factor': 0,
'vcpu_weight': 1,
'extra_specs': {},
},
}
_INSTANCE_TYPE_OBJ_FIXTURES = {
1: objects.Flavor(id=1, flavorid='fakeid-1', name='fake1.small',
memory_mb=128, vcpus=1, root_gb=1,
ephemeral_gb=0, swap=0, rxtx_factor=0,
vcpu_weight=1, extra_specs={}),
2: objects.Flavor(id=2, flavorid='fakeid-2', name='fake1.medium',
memory_mb=256, vcpus=2, root_gb=5,
ephemeral_gb=0, swap=0, rxtx_factor=0,
vcpu_weight=1, extra_specs={}),
}
_2MB = 2 * units.Mi / units.Ki
_INSTANCE_NUMA_TOPOLOGIES = {
'2mb': objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=_2MB, pagesize=0),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=_2MB, pagesize=0)]),
}
_NUMA_LIMIT_TOPOLOGIES = {
'2mb': objects.NUMATopologyLimits(id=0,
cpu_allocation_ratio=1.0,
ram_allocation_ratio=1.0),
}
_NUMA_PAGE_TOPOLOGIES = {
'2kb*8': objects.NUMAPagesTopology(size_kb=2, total=8, used=0)
}
_NUMA_HOST_TOPOLOGIES = {
'2mb': objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([1, 2]), memory=_2MB,
cpu_usage=0, memory_usage=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2kb*8']],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=_2MB,
cpu_usage=0, memory_usage=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2kb*8']],
siblings=[], pinned_cpus=set([]))]),
}
_INSTANCE_FIXTURES = [
objects.Instance(
id=1,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='c17741a5-6f3d-44a8-ade8-773dc8c29124',
memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'],
numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
instance_type_id=1,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=None,
os_type='fake-os', # Used by the stats collector.
project_id='fake-project', # Used by the stats collector.
flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
),
objects.Instance(
id=2,
host=None,
node=None,
uuid='33805b54-dea6-47b8-acb2-22aeb1b57919',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
instance_type_id=2,
vm_state=vm_states.DELETED,
power_state=power_state.SHUTDOWN,
task_state=None,
os_type='fake-os',
project_id='fake-project-2',
flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
),
]
_MIGRATION_FIXTURES = {
# A migration that has only this compute node as the source host
'source-only': objects.Migration(
id=1,
instance_uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
source_compute=_HOSTNAME,
dest_compute='other-host',
source_node=_NODENAME,
dest_node='other-node',
old_instance_type_id=1,
new_instance_type_id=2,
migration_type='resize',
status='migrating'
),
# A migration that has only this compute node as the dest host
'dest-only': objects.Migration(
id=2,
instance_uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
source_compute='other-host',
dest_compute=_HOSTNAME,
source_node='other-node',
dest_node=_NODENAME,
old_instance_type_id=1,
new_instance_type_id=2,
migration_type='resize',
status='migrating'
),
# A migration that has this compute node as both the source and dest host
'source-and-dest': objects.Migration(
id=3,
instance_uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
source_compute=_HOSTNAME,
dest_compute=_HOSTNAME,
source_node=_NODENAME,
dest_node=_NODENAME,
old_instance_type_id=1,
new_instance_type_id=2,
migration_type='resize',
status='migrating'
),
# A migration that has this compute node as destination and is an evac
'dest-only-evac': objects.Migration(
id=4,
instance_uuid='077fb63a-bdc8-4330-90ef-f012082703dc',
source_compute='other-host',
dest_compute=_HOSTNAME,
source_node='other-node',
dest_node=_NODENAME,
old_instance_type_id=2,
new_instance_type_id=None,
migration_type='evacuation',
status='pre-migrating'
),
}
_MIGRATION_INSTANCE_FIXTURES = {
# source-only
'f15ecfb0-9bf6-42db-9837-706eb2c4bf08': objects.Instance(
id=101,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'],
numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
instance_type_id=1,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
),
# dest-only
'f6ed631a-8645-4b12-8e1e-2fff55795765': objects.Instance(
id=102,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
instance_type_id=2,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
),
# source-and-dest
'f4f0bfea-fe7e-4264-b598-01cb13ef1997': objects.Instance(
id=3,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
instance_type_id=2,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
),
# dest-only-evac
'077fb63a-bdc8-4330-90ef-f012082703dc': objects.Instance(
id=102,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='077fb63a-bdc8-4330-90ef-f012082703dc',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
instance_type_id=2,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.REBUILDING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
),
}
_MIGRATION_CONTEXT_FIXTURES = {
'f4f0bfea-fe7e-4264-b598-01cb13ef1997': objects.MigrationContext(
instance_uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
migration_id=3,
new_numa_topology=None,
old_numa_topology=None),
'c17741a5-6f3d-44a8-ade8-773dc8c29124': objects.MigrationContext(
instance_uuid='c17741a5-6f3d-44a8-ade8-773dc8c29124',
migration_id=3,
new_numa_topology=None,
old_numa_topology=None),
'f15ecfb0-9bf6-42db-9837-706eb2c4bf08': objects.MigrationContext(
instance_uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
migration_id=1,
new_numa_topology=None,
old_numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb']),
'f6ed631a-8645-4b12-8e1e-2fff55795765': objects.MigrationContext(
instance_uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
migration_id=2,
new_numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
old_numa_topology=None),
'077fb63a-bdc8-4330-90ef-f012082703dc': objects.MigrationContext(
instance_uuid='077fb63a-bdc8-4330-90ef-f012082703dc',
migration_id=2,
new_numa_topology=None,
old_numa_topology=None),
}
def overhead_zero(instance):
# Emulate that the driver does not adjust the memory
# of the instance...
return {
'memory_mb': 0,
'disk_gb': 0,
}
def setup_rt(hostname, nodename, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES,
estimate_overhead=overhead_zero):
"""Sets up the resource tracker instance with mock fixtures.
:param virt_resources: Optional override of the resource representation
returned by the virt driver's
`get_available_resource()` method.
:param estimate_overhead: Optional override of a function that should
return overhead of memory given an instance
object. Defaults to returning zero overhead.
"""
sched_client_mock = mock.MagicMock()
notifier_mock = mock.MagicMock()
vd = mock.MagicMock()
# Make sure we don't change any global fixtures during tests
virt_resources = copy.deepcopy(virt_resources)
vd.get_available_resource.return_value = virt_resources
vd.estimate_instance_overhead.side_effect = estimate_overhead
with test.nested(
mock.patch('nova.scheduler.client.SchedulerClient',
return_value=sched_client_mock),
mock.patch('nova.rpc.get_notifier', return_value=notifier_mock)):
rt = resource_tracker.ResourceTracker(hostname, vd, nodename)
return (rt, sched_client_mock, vd)
class BaseTestCase(test.NoDBTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.rt = None
self.flags(my_ip='1.1.1.1')
def _setup_rt(self, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES,
estimate_overhead=overhead_zero):
(self.rt, self.sched_client_mock,
self.driver_mock) = setup_rt(
_HOSTNAME, _NODENAME, virt_resources, estimate_overhead)
class TestUpdateAvailableResources(BaseTestCase):
def _update_available_resources(self):
# We test RT._update separately, since the complexity
# of the update_available_resource() function is high enough as
# it is, we just want to focus here on testing the resources
# parameter that update_available_resource() eventually passes
# to _update().
with mock.patch.object(self.rt, '_update') as update_mock:
self.rt.update_available_resource(mock.sentinel.ctx)
return update_mock
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_no_reserved(self, get_mock, migr_mock,
get_cn_mock, pci_mock,
instance_pci_mock):
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self._setup_rt()
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
vd = self.driver_mock
vd.get_available_resource.assert_called_once_with(_NODENAME)
get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
_NODENAME,
expected_attrs=[
'system_metadata',
'numa_topology',
'flavor',
'migration_context'])
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
_NODENAME)
migr_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
_NODENAME)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
# host is added in update_available_resources()
# before calling _update()
'host': _HOSTNAME,
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': _NODENAME,
'free_disk_gb': 6,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 512,
'memory_mb_used': 0,
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
}
_update_compute_node(expected_resources, **vals)
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_reserved_disk_and_ram(
self, get_mock, migr_mock, get_cn_mock, pci_mock,
instance_pci_mock):
self.flags(reserved_host_disk_mb=1024,
reserved_host_memory_mb=512)
self._setup_rt()
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
_NODENAME)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
# host is added in update_available_resources()
# before calling _update()
'host': _HOSTNAME,
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': _NODENAME,
'free_disk_gb': 5, # 6GB avail - 1 GB reserved
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 0, # 512MB avail - 512MB reserved
'memory_mb_used': 512, # 0MB used + 512MB reserved
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 1, # 0GB used + 1 GB reserved
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
}
_update_compute_node(expected_resources, **vals)
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_some_instances_no_migrations(self, get_mock, migr_mock,
get_cn_mock, pci_mock,
instance_pci_mock):
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=1,
memory_mb_used=128,
local_gb_used=1)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = _INSTANCE_FIXTURES
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
_NODENAME)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
# host is added in update_available_resources()
# before calling _update()
'host': _HOSTNAME,
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': _NODENAME,
'free_disk_gb': 5, # 6 - 1 used
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 384, # 512 - 128 used
'memory_mb_used': 128,
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 1,
'hypervisor_type': 'fake',
'local_gb_used': 1,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 1 # One active instance
}
_update_compute_node(expected_resources, **vals)
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_orphaned_instances_no_migrations(self, get_mock, migr_mock,
get_cn_mock, pci_mock,
instance_pci_mock):
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(memory_mb_used=64)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
# Orphaned instances are those that the virt driver has on
# record as consuming resources on the compute node, but the
# Nova database has no record of the instance being active
# on the host. For some reason, the resource tracker only
# considers orphaned instance's memory usage in its calculations
# of free resources...
orphaned_usages = {
'71ed7ef6-9d2e-4c65-9f4e-90bb6b76261d': {
# Yes, the return result format of get_per_instance_usage
# is indeed this stupid and redundant. Also note that the
# libvirt driver just returns an empty dict always for this
# method and so who the heck knows whether this stuff
# actually works.
'uuid': '71ed7ef6-9d2e-4c65-9f4e-90bb6b76261d',
'memory_mb': 64
}
}
vd = self.driver_mock
vd.get_per_instance_usage.return_value = orphaned_usages
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
_NODENAME)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
# host is added in update_available_resources()
# before calling _update()
'host': _HOSTNAME,
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': _NODENAME,
'free_disk_gb': 6,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 448, # 512 - 64 orphaned usage
'memory_mb_used': 64,
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
# Yep, for some reason, orphaned instances are not counted
# as running VMs...
'running_vms': 0
}
_update_compute_node(expected_resources, **vals)
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_source_migration(self, get_mock, get_inst_mock,
migr_mock, get_cn_mock, pci_mock,
instance_pci_mock):
# We test the behavior of update_available_resource() when
# there is an active migration that involves this compute node
# as the source host not the destination host, and the resource
# tracker does not have any instances assigned to it. This is
# the case when a migration from this compute host to another
# has been completed, but the user has not confirmed the resize
# yet, so the resource tracker must continue to keep the resources
# for the original instance type available on the source compute
# node in case of a revert of the resize.
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=4,
memory_mb_used=128,
local_gb_used=1)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = []
migr_obj = _MIGRATION_FIXTURES['source-only']
migr_mock.return_value = [migr_obj]
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
# Migration.instance property is accessed in the migration
# processing code, and this property calls
# objects.Instance.get_by_uuid, so we have the migration return
inst_uuid = migr_obj.instance_uuid
instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
get_inst_mock.return_value = instance
instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
_NODENAME)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
# host is added in update_available_resources()
# before calling _update()
'host': _HOSTNAME,
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': _NODENAME,
'free_disk_gb': 5,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 384, # 512 total - 128 for possible revert of orig
'memory_mb_used': 128, # 128 possible revert amount
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 1,
'hypervisor_type': 'fake',
'local_gb_used': 1,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
}
_update_compute_node(expected_resources, **vals)
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_dest_migration(self, get_mock, get_inst_mock,
migr_mock, get_cn_mock, pci_mock,
instance_pci_mock):
# We test the behavior of update_available_resource() when
# there is an active migration that involves this compute node
# as the destination host not the source host, and the resource
# tracker does not yet have any instances assigned to it. This is
# the case when a migration to this compute host from another host
# is in progress, but the user has not confirmed the resize
# yet, so the resource tracker must reserve the resources
# for the possibly-to-be-confirmed instance's instance type
# node in case of a confirm of the resize.
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=2,
memory_mb_used=256,
local_gb_used=5)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = []
migr_obj = _MIGRATION_FIXTURES['dest-only']
migr_mock.return_value = [migr_obj]
inst_uuid = migr_obj.instance_uuid
instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
get_inst_mock.return_value = instance
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
_NODENAME)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
# host is added in update_available_resources()
# before calling _update()
'host': _HOSTNAME,
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': _NODENAME,
'free_disk_gb': 1,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 256, # 512 total - 256 for possible confirm of new
'memory_mb_used': 256, # 256 possible confirmed amount
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 2,
'hypervisor_type': 'fake',
'local_gb_used': 5,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
}
_update_compute_node(expected_resources, **vals)
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_dest_evacuation(self, get_mock, get_inst_mock,
migr_mock, get_cn_mock, pci_mock,
instance_pci_mock):
# We test the behavior of update_available_resource() when
# there is an active evacuation that involves this compute node
# as the destination host not the source host, and the resource
# tracker does not yet have any instances assigned to it. This is
# the case when a migration to this compute host from another host
# is in progress, but not finished yet.
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=2,
memory_mb_used=256,
local_gb_used=5)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = []
migr_obj = _MIGRATION_FIXTURES['dest-only-evac']
migr_mock.return_value = [migr_obj]
inst_uuid = migr_obj.instance_uuid
instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
get_inst_mock.return_value = instance
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
_NODENAME)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
# host is added in update_available_resources()
# before calling _update()
'host': _HOSTNAME,
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': _NODENAME,
'free_disk_gb': 1,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 256, # 512 total - 256 for possible confirm of new
'memory_mb_used': 256, # 256 possible confirmed amount
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 2,
'hypervisor_type': 'fake',
'local_gb_used': 5,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
}
_update_compute_node(expected_resources, **vals)
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
return_value=None)
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_some_instances_source_and_dest_migration(self, get_mock,
get_inst_mock, migr_mock,
get_cn_mock,
get_mig_ctxt_mock,
pci_mock,
instance_pci_mock):
# We test the behavior of update_available_resource() when
# there is an active migration that involves this compute node
# as the destination host AND the source host, and the resource
# tracker has a few instances assigned to it, including the
# instance that is resizing to this same compute node. The tracking
# of resource amounts takes into account both the old and new
# resize instance types as taking up space on the node.
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=4,
memory_mb_used=512,
local_gb_used=7)
self._setup_rt(virt_resources=virt_resources)
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
migr_mock.return_value = [migr_obj]
inst_uuid = migr_obj.instance_uuid
# The resizing instance has already had its instance type
# changed to the *new* instance type (the bigger one, instance type 2)
resizing_instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
resizing_instance.migration_context = (
_MIGRATION_CONTEXT_FIXTURES[resizing_instance.uuid])
all_instances = _INSTANCE_FIXTURES + [resizing_instance]
get_mock.return_value = all_instances
get_inst_mock.return_value = resizing_instance
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
_NODENAME)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
# host is added in update_available_resources()
# before calling _update()
'host': _HOSTNAME,
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': _NODENAME,
# 6 total - 1G existing - 5G new flav - 1G old flav
'free_disk_gb': -1,
'hypervisor_version': 0,
'local_gb': 6,
# 512 total - 128 existing - 256 new flav - 128 old flav
'free_ram_mb': 0,
'memory_mb_used': 512, # 128 exist + 256 new flav + 128 old flav
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 4,
'hypervisor_type': 'fake',
'local_gb_used': 7, # 1G existing, 5G new flav + 1 old flav
'memory_mb': 512,
'current_workload': 1, # One migrating instance...
'vcpus': 4,
'running_vms': 2
}
_update_compute_node(expected_resources, **vals)
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
class TestInitComputeNode(BaseTestCase):
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_no_op_init_compute_node(self, get_mock, service_mock,
create_mock, pci_mock):
self._setup_rt()
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
self.rt.compute_node = compute_node
self.rt._init_compute_node(mock.sentinel.ctx, resources)
self.assertFalse(service_mock.called)
self.assertFalse(get_mock.called)
self.assertFalse(create_mock.called)
self.assertTrue(pci_mock.called)
self.assertFalse(self.rt.disabled)
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_compute_node_loaded(self, get_mock, create_mock,
pci_mock):
self._setup_rt()
def fake_get_node(_ctx, host, node):
res = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
return res
get_mock.side_effect = fake_get_node
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
self.rt._init_compute_node(mock.sentinel.ctx, resources)
get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
_NODENAME)
self.assertFalse(create_mock.called)
self.assertFalse(self.rt.disabled)
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList(objects=[]))
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_compute_node_created_on_empty(self, get_mock, create_mock,
pci_tracker_mock):
self.flags(cpu_allocation_ratio=1.0, ram_allocation_ratio=1.0,
disk_allocation_ratio=1.0)
self._setup_rt()
get_mock.side_effect = exc.NotFound
resources = {
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': _NODENAME,
'free_disk_gb': 6,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 512,
'memory_mb_used': 0,
'pci_device_pools': [],
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0,
'pci_passthrough_devices': '[]'
}
# The expected compute represents the initial values used
# when creating a compute node.
expected_compute = objects.ComputeNode(
id=42,
host_ip=resources['host_ip'],
vcpus=resources['vcpus'],
memory_mb=resources['memory_mb'],
local_gb=resources['local_gb'],
cpu_info=resources['cpu_info'],
vcpus_used=resources['vcpus_used'],
memory_mb_used=resources['memory_mb_used'],
local_gb_used=resources['local_gb_used'],
numa_topology=resources['numa_topology'],
hypervisor_type=resources['hypervisor_type'],
hypervisor_version=resources['hypervisor_version'],
hypervisor_hostname=resources['hypervisor_hostname'],
# NOTE(sbauza): ResourceTracker adds host field
host=_HOSTNAME,
# NOTE(sbauza): ResourceTracker adds CONF allocation ratios
ram_allocation_ratio=1.0,
cpu_allocation_ratio=1.0,
disk_allocation_ratio=1.0,
stats={},
pci_device_pools=objects.PciDevicePoolList(objects=[])
)
def set_cn_id():
# The PCI tracker needs the compute node's ID when starting up, so
# make sure that we set the ID value so we don't get a Cannot load
# 'id' in base class error
self.rt.compute_node.id = 42 # Has to be a number, not a mock
create_mock.side_effect = set_cn_id
self.rt._init_compute_node(mock.sentinel.ctx, resources)
self.assertFalse(self.rt.disabled)
get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
_NODENAME)
create_mock.assert_called_once_with()
self.assertTrue(obj_base.obj_equal_prims(expected_compute,
self.rt.compute_node))
pci_tracker_mock.assert_called_once_with(mock.sentinel.ctx,
42)
class TestUpdateComputeNode(BaseTestCase):
@mock.patch('nova.objects.Service.get_by_compute_host')
def test_existing_compute_node_updated_same_resources(self, service_mock):
self._setup_rt()
# This is the same set of resources as the fixture, deliberately. We
# are checking below to see that update_resource_stats() is not
# needlessly called when the resources don't actually change.
compute = objects.ComputeNode(
host_ip='1.1.1.1',
numa_topology=None,
metrics='[]',
cpu_info='',
hypervisor_hostname=_NODENAME,
free_disk_gb=6,
hypervisor_version=0,
local_gb=6,
free_ram_mb=512,
memory_mb_used=0,
pci_device_pools=objects.PciDevicePoolList(),
vcpus_used=0,
hypervisor_type='fake',
local_gb_used=0,
memory_mb=512,
current_workload=0,
vcpus=4,
running_vms=0,
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0,
)
self.rt.compute_node = compute
self.rt._update(mock.sentinel.ctx)
self.assertFalse(self.rt.disabled)
self.assertFalse(service_mock.called)
# The above call to _update() will populate the
# RT.old_resources collection with the resources. Here, we check that
# if we call _update() again with the same resources, that
# the scheduler client won't be called again to update those
# (unchanged) resources for the compute node
self.sched_client_mock.reset_mock()
urs_mock = self.sched_client_mock.update_resource_stats
self.rt._update(mock.sentinel.ctx)
self.assertFalse(urs_mock.called)
@mock.patch('nova.objects.Service.get_by_compute_host')
def test_existing_compute_node_updated_new_resources(self, service_mock):
self._setup_rt()
# Deliberately changing local_gb_used, vcpus_used, and memory_mb_used
# below to be different from the compute node fixture's base usages.
# We want to check that the code paths update the stored compute node
# usage records with what is supplied to _update().
compute = objects.ComputeNode(
host=_HOSTNAME,
host_ip='1.1.1.1',
numa_topology=None,
metrics='[]',
cpu_info='',
hypervisor_hostname=_NODENAME,
free_disk_gb=2,
hypervisor_version=0,
local_gb=6,
free_ram_mb=384,
memory_mb_used=128,
pci_device_pools=objects.PciDevicePoolList(),
vcpus_used=2,
hypervisor_type='fake',
local_gb_used=4,
memory_mb=512,
current_workload=0,
vcpus=4,
running_vms=0,
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0,
)
self.rt.compute_node = compute
self.rt._update(mock.sentinel.ctx)
self.assertFalse(self.rt.disabled)
self.assertFalse(service_mock.called)
urs_mock = self.sched_client_mock.update_resource_stats
urs_mock.assert_called_once_with(self.rt.compute_node)
class TestInstanceClaim(BaseTestCase):
def setUp(self):
super(TestInstanceClaim, self).setUp()
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self._setup_rt()
self.rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
# not using mock.sentinel.ctx because instance_claim calls #elevated
self.ctx = mock.MagicMock()
self.elevated = mock.MagicMock()
self.ctx.elevated.return_value = self.elevated
self.instance = _INSTANCE_FIXTURES[0].obj_clone()
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def test_claim_disabled(self):
self.rt.compute_node = None
self.assertTrue(self.rt.disabled)
with mock.patch.object(self.instance, 'save'):
claim = self.rt.instance_claim(mock.sentinel.ctx, self.instance,
None)
self.assertEqual(self.rt.host, self.instance.host)
self.assertEqual(self.rt.host, self.instance.launched_on)
self.assertEqual(self.rt.nodename, self.instance.node)
self.assertIsInstance(claim, claims.NopClaim)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_update_usage_with_claim(self, migr_mock, pci_mock):
# Test that RT.update_usage() only changes the compute node
# resources if there has been a claim first.
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
self.rt.update_usage(self.ctx, self.instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
vals = {
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected.local_gb - disk_used,
"free_ram_mb": expected.memory_mb - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': objects.PciDevicePoolList(),
'stats': {
'io_workload': 0,
'num_instances': 1,
'num_task_None': 1,
'num_os_type_' + self.instance.os_type: 1,
'num_proj_' + self.instance.project_id: 1,
'num_vm_' + self.instance.vm_state: 1,
},
}
_update_compute_node(expected, **vals)
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
update_mock.assert_called_once_with(self.elevated)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_update_usage_removed(self, migr_mock, pci_mock):
# Test that RT.update_usage() removes the instance when update is
# called in a removed state
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
vals = {
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected.local_gb - disk_used,
"free_ram_mb": expected.memory_mb - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': objects.PciDevicePoolList(),
'stats': {
'io_workload': 0,
'num_instances': 1,
'num_task_None': 1,
'num_os_type_' + self.instance.os_type: 1,
'num_proj_' + self.instance.project_id: 1,
'num_vm_' + self.instance.vm_state: 1,
},
}
_update_compute_node(expected, **vals)
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
update_mock.assert_called_once_with(self.elevated)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
expected_updated = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'pci_device_pools': objects.PciDevicePoolList(),
'stats': {
'io_workload': 0,
'num_instances': 0,
'num_task_None': 0,
'num_os_type_' + self.instance.os_type: 0,
'num_proj_' + self.instance.project_id: 0,
'num_vm_' + self.instance.vm_state: 0,
},
}
_update_compute_node(expected_updated, **vals)
self.instance.vm_state = vm_states.SHELVED_OFFLOADED
with mock.patch.object(self.rt, '_update') as update_mock:
self.rt.update_usage(self.ctx, self.instance)
self.assertTrue(obj_base.obj_equal_prims(expected_updated,
self.rt.compute_node))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim(self, migr_mock, pci_mock):
self.assertFalse(self.rt.disabled)
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected.local_gb - disk_used,
"free_ram_mb": expected.memory_mb - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': objects.PciDevicePoolList(),
'stats': {
'io_workload': 0,
'num_instances': 1,
'num_task_None': 1,
'num_os_type_' + self.instance.os_type: 1,
'num_proj_' + self.instance.project_id: 1,
'num_vm_' + self.instance.vm_state: 1,
},
}
_update_compute_node(expected, **vals)
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
update_mock.assert_called_once_with(self.elevated)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
self.assertEqual(self.rt.host, self.instance.host)
self.assertEqual(self.rt.host, self.instance.launched_on)
self.assertEqual(self.rt.nodename, self.instance.node)
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests',
return_value=True)
@mock.patch('nova.pci.manager.PciDevTracker.claim_instance')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_with_pci(self, migr_mock, pci_mock,
pci_manager_mock, pci_stats_mock):
# Test that a claim involving PCI requests correctly claims
# PCI devices on the host and sends an updated pci_device_pools
# attribute of the ComputeNode object.
self.assertFalse(self.rt.disabled)
# TODO(jaypipes): Remove once the PCI tracker is always created
# upon the resource tracker being initialized...
self.rt.pci_tracker = pci_manager.PciDevTracker(mock.sentinel.ctx)
pci_pools = objects.PciDevicePoolList()
pci_manager_mock.return_value = pci_pools
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
pci_mock.return_value = objects.InstancePCIRequests(requests=[request])
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected.local_gb - disk_used,
"free_ram_mb": expected.memory_mb - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': pci_pools,
'stats': {
'io_workload': 0,
'num_instances': 1,
'num_task_None': 1,
'num_os_type_' + self.instance.os_type: 1,
'num_proj_' + self.instance.project_id: 1,
'num_vm_' + self.instance.vm_state: 1,
},
}
_update_compute_node(expected, **vals)
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
update_mock.assert_called_once_with(self.elevated)
pci_manager_mock.assert_called_once_with(mock.ANY, # context...
pci_mock.return_value,
None)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_abort_context_manager(self, migr_mock, pci_mock):
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
self.assertEqual(0, self.rt.compute_node.local_gb_used)
self.assertEqual(0, self.rt.compute_node.memory_mb_used)
self.assertEqual(0, self.rt.compute_node.running_vms)
mock_save = mock.MagicMock()
mock_clear_numa = mock.MagicMock()
@mock.patch.object(self.instance, 'save', mock_save)
@mock.patch.object(self.instance, 'clear_numa_topology',
mock_clear_numa)
@mock.patch.object(objects.Instance, 'obj_clone',
return_value=self.instance)
def _doit(mock_clone):
with self.rt.instance_claim(self.ctx, self.instance, None):
# Raise an exception. Just make sure below that the abort()
# method of the claim object was called (and the resulting
# resources reset to the pre-claimed amounts)
raise test.TestingException()
self.assertRaises(test.TestingException, _doit)
self.assertEqual(2, mock_save.call_count)
mock_clear_numa.assert_called_once_with()
self.assertIsNone(self.instance.host)
self.assertIsNone(self.instance.node)
# Assert that the resources claimed by the Claim() constructor
# are returned to the resource tracker due to the claim's abort()
# method being called when triggered by the exception raised above.
self.assertEqual(0, self.rt.compute_node.local_gb_used)
self.assertEqual(0, self.rt.compute_node.memory_mb_used)
self.assertEqual(0, self.rt.compute_node.running_vms)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_abort(self, migr_mock, pci_mock):
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
@mock.patch.object(objects.Instance, 'obj_clone',
return_value=self.instance)
@mock.patch.object(self.instance, 'save')
def _claim(mock_save, mock_clone):
return self.rt.instance_claim(self.ctx, self.instance, None)
claim = _claim()
self.assertEqual(disk_used, self.rt.compute_node.local_gb_used)
self.assertEqual(self.instance.memory_mb,
self.rt.compute_node.memory_mb_used)
self.assertEqual(1, self.rt.compute_node.running_vms)
mock_save = mock.MagicMock()
mock_clear_numa = mock.MagicMock()
@mock.patch.object(self.instance, 'save', mock_save)
@mock.patch.object(self.instance, 'clear_numa_topology',
mock_clear_numa)
def _abort():
claim.abort()
_abort()
mock_save.assert_called_once_with()
mock_clear_numa.assert_called_once_with()
self.assertIsNone(self.instance.host)
self.assertIsNone(self.instance.node)
self.assertEqual(0, self.rt.compute_node.local_gb_used)
self.assertEqual(0, self.rt.compute_node.memory_mb_used)
self.assertEqual(0, self.rt.compute_node.running_vms)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_limits(self, migr_mock, pci_mock):
self.assertFalse(self.rt.disabled)
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
good_limits = {
'memory_mb': _COMPUTE_NODE_FIXTURES[0].memory_mb,
'disk_gb': _COMPUTE_NODE_FIXTURES[0].local_gb,
'vcpu': _COMPUTE_NODE_FIXTURES[0].vcpus,
}
for key in good_limits.keys():
bad_limits = copy.deepcopy(good_limits)
bad_limits[key] = 0
self.assertRaises(exc.ComputeResourcesUnavailable,
self.rt.instance_claim,
self.ctx, self.instance, bad_limits)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_claim_numa(self, migr_mock, pci_mock):
self.assertFalse(self.rt.disabled)
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
self.instance.numa_topology = _INSTANCE_NUMA_TOPOLOGIES['2mb']
host_topology = _NUMA_HOST_TOPOLOGIES['2mb']
self.rt.compute_node.numa_topology = host_topology._to_json()
limits = {'numa_topology': _NUMA_LIMIT_TOPOLOGIES['2mb']}
expected_numa = copy.deepcopy(host_topology)
for cell in expected_numa.cells:
cell.memory_usage += _2MB
cell.cpu_usage += 1
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, limits)
update_mock.assert_called_once_with(self.ctx.elevated())
updated_compute_node = self.rt.compute_node
new_numa = updated_compute_node.numa_topology
new_numa = objects.NUMATopology.obj_from_db_obj(new_numa)
self.assertEqualNUMAHostTopology(expected_numa, new_numa)
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
class TestMoveClaim(BaseTestCase):
def setUp(self):
super(TestMoveClaim, self).setUp()
self._setup_rt()
self.instance = _INSTANCE_FIXTURES[0].obj_clone()
self.flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1]
self.limits = {}
# not using mock.sentinel.ctx because resize_claim calls #elevated
self.ctx = mock.MagicMock()
self.elevated = mock.MagicMock()
self.ctx.elevated.return_value = self.elevated
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
with test.nested(
mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename',
return_value=copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])),
mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[])),
mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList()),
mock.patch('nova.objects.InstanceList.get_by_host_and_node',
return_value=objects.InstanceList()),
mock.patch('nova.objects.MigrationList.'
'get_in_progress_by_host_and_node',
return_value=objects.MigrationList())
) as (cn_mock, inst_pci_mock, pci_dev_mock, inst_list_mock, migr_mock):
self.rt.update_available_resource(self.ctx)
def register_mocks(self, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock, inst_save_mock):
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
self.inst_list_mock = inst_list_mock
self.inst_by_uuid = inst_by_uuid
self.migr_mock = migr_mock
self.inst_save_mock = inst_save_mock
def audit(self, rt, instances, migrations, migr_inst):
self.inst_list_mock.return_value = \
objects.InstanceList(objects=instances)
self.migr_mock.return_value = \
objects.MigrationList(objects=migrations)
self.inst_by_uuid.return_value = migr_inst
rt.update_available_resource(self.ctx)
def assertEqual(self, expected, actual):
if type(expected) != dict or type(actual) != dict:
super(TestMoveClaim, self).assertEqual(expected, actual)
return
fail = False
for k, e in expected.items():
a = actual[k]
if e != a:
print("%s: %s != %s" % (k, e, a))
fail = True
if fail:
self.fail()
def adjust_expected(self, expected, flavor):
disk_used = flavor['root_gb'] + flavor['ephemeral_gb']
expected.free_disk_gb -= disk_used
expected.local_gb_used += disk_used
expected.free_ram_mb -= flavor['memory_mb']
expected.memory_mb_used += flavor['memory_mb']
expected.vcpus_used += flavor['vcpus']
@mock.patch('nova.objects.Flavor.get_by_id')
def test_claim(self, flavor_mock, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock, inst_save_mock):
"""Resize self.instance and check that the expected quantities of each
resource have been consumed.
"""
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
self.driver_mock.get_host_ip_addr.return_value = "fake-ip"
flavor_mock.return_value = objects.Flavor(**self.flavor)
mig_context_obj = _MIGRATION_CONTEXT_FIXTURES[self.instance.uuid]
self.instance.migration_context = mig_context_obj
expected = copy.deepcopy(self.rt.compute_node)
self.adjust_expected(expected, self.flavor)
with test.nested(
mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[])),
mock.patch.object(self.rt, '_create_migration',
return_value=_MIGRATION_FIXTURES['source-only']),
mock.patch('nova.objects.MigrationContext',
return_value=mig_context_obj)
) as (int_pci_mock, migr_mock, ctxt_mock):
claim = self.rt.resize_claim(
self.ctx, self.instance, self.flavor, None)
self.assertEqual(1, ctxt_mock.call_count)
self.assertIsInstance(claim, claims.MoveClaim)
inst_save_mock.assert_called_once_with()
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
def test_claim_abort(self, pci_mock, inst_list_mock,
inst_by_uuid, migr_mock, inst_save_mock):
# Resize self.instance and check that the expected quantities of each
# resource have been consumed. The abort the resize claim and check
# that the resources have been set back to their original values.
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
self.driver_mock.get_host_ip_addr.return_value = "fake-host"
migr_obj = _MIGRATION_FIXTURES['dest-only']
self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']]
mig_context_obj = _MIGRATION_CONTEXT_FIXTURES[self.instance.uuid]
self.instance.migration_context = mig_context_obj
self.flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2]
with mock.patch.object(self.rt, '_create_migration') as migr_mock:
migr_mock.return_value = migr_obj
claim = self.rt.resize_claim(
self.ctx, self.instance, self.flavor, None)
self.assertIsInstance(claim, claims.MoveClaim)
self.assertEqual(5, self.rt.compute_node.local_gb_used)
self.assertEqual(256, self.rt.compute_node.memory_mb_used)
self.assertEqual(1, len(self.rt.tracked_migrations))
with mock.patch('nova.objects.Instance.'
'drop_migration_context') as drop_migr_mock:
claim.abort()
drop_migr_mock.assert_called_once_with()
self.assertEqual(0, self.rt.compute_node.local_gb_used)
self.assertEqual(0, self.rt.compute_node.memory_mb_used)
self.assertEqual(0, len(self.rt.tracked_migrations))
def test_same_host(self, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock, inst_save_mock):
"""Resize self.instance to the same host but with a different flavor.
Then abort the claim. Check that the same amount of resources are
available afterwards as we started with.
"""
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']]
self.instance._context = self.ctx
mig_context_obj = _MIGRATION_CONTEXT_FIXTURES[self.instance.uuid]
self.instance.migration_context = mig_context_obj
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
expected = copy.deepcopy(self.rt.compute_node)
create_mig_mock = mock.patch.object(self.rt, '_create_migration')
mig_ctxt_mock = mock.patch('nova.objects.MigrationContext',
return_value=mig_context_obj)
with create_mig_mock as migr_mock, mig_ctxt_mock as ctxt_mock:
migr_mock.return_value = migr_obj
claim = self.rt.resize_claim(self.ctx, self.instance,
_INSTANCE_TYPE_OBJ_FIXTURES[1], None)
self.assertEqual(1, ctxt_mock.call_count)
self.audit(self.rt, [self.instance], [migr_obj], self.instance)
inst_save_mock.assert_called_once_with()
self.assertNotEqual(expected, self.rt.compute_node)
claim.instance.migration_context = mig_context_obj
with mock.patch.object(claim.instance,
'drop_migration_context') as drop_mig_ctxt:
claim.abort()
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
drop_mig_ctxt.assert_called_once_with()
def test_revert_reserve_source(
self, pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock):
"""Check that the source node of an instance migration reserves
resources until the migration has completed, even if the migration is
reverted.
"""
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
# Get our migrations, instances and itypes in a row
src_migr = _MIGRATION_FIXTURES['source-only']
src_instance = (
_MIGRATION_INSTANCE_FIXTURES[src_migr['instance_uuid']].obj_clone()
)
src_instance.migration_context = (
_MIGRATION_CONTEXT_FIXTURES[src_instance.uuid])
old_itype = _INSTANCE_TYPE_FIXTURES[src_migr['old_instance_type_id']]
dst_migr = _MIGRATION_FIXTURES['dest-only']
dst_instance = (
_MIGRATION_INSTANCE_FIXTURES[dst_migr['instance_uuid']].obj_clone()
)
new_itype = _INSTANCE_TYPE_FIXTURES[dst_migr['new_instance_type_id']]
dst_instance.migration_context = (
_MIGRATION_CONTEXT_FIXTURES[dst_instance.uuid])
# Set up the destination resource tracker
# update_available_resource to initialise extensible resource trackers
src_rt = self.rt
(dst_rt, _, _) = setup_rt("other-host", "other-node")
inst_list_mock.return_value = objects.InstanceList(objects=[])
with test.nested(
mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename',
return_value=copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])),
mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[])),
mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList()),
mock.patch('nova.objects.InstanceList.get_by_host_and_node',
return_value=objects.InstanceList()),
mock.patch('nova.objects.MigrationList.'
'get_in_progress_by_host_and_node',
return_value=objects.MigrationList())
) as (cn_mock, inst_pci_mock, pci_dev_mock, inst_list_mock, migr_mock):
dst_rt.update_available_resource(self.ctx)
# Register the instance with dst_rt
expected = copy.deepcopy(dst_rt.compute_node)
with mock.patch.object(dst_instance, 'save'):
dst_rt.instance_claim(self.ctx, dst_instance)
self.adjust_expected(expected, new_itype)
expected.stats = {'num_task_resize_migrating': 1,
'io_workload': 1,
'num_instances': 1,
'num_proj_fake-project': 1,
'num_vm_active': 1,
'num_os_type_fake-os': 1}
expected.current_workload = 1
expected.running_vms = 1
self.assertTrue(obj_base.obj_equal_prims(expected,
dst_rt.compute_node))
# Provide the migration via a mock, then audit dst_rt to check that
# the instance + migration resources are not double-counted
self.audit(dst_rt, [dst_instance], [dst_migr], dst_instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
dst_rt.compute_node))
# Audit src_rt with src_migr
expected = copy.deepcopy(src_rt.compute_node)
self.adjust_expected(expected, old_itype)
self.audit(src_rt, [], [src_migr], src_instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
src_rt.compute_node))
# Flag the instance as reverting and re-audit
src_instance['vm_state'] = vm_states.RESIZED
src_instance['task_state'] = task_states.RESIZE_REVERTING
self.audit(src_rt, [], [src_migr], src_instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
src_rt.compute_node))
def test_update_available_resources_migration_no_context(self, pci_mock,
inst_list_mock, inst_by_uuid, migr_mock, inst_save_mock):
"""When migrating onto older nodes - it is possible for the
migration_context record to be missing. Confirm resource audit works
regardless.
"""
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']]
self.instance.migration_context = None
expected = copy.deepcopy(self.rt.compute_node)
self.adjust_expected(expected, self.flavor)
self.audit(self.rt, [], [migr_obj], self.instance)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
def test_dupe_filter(self, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock, inst_save_mock):
self.register_mocks(pci_mock, inst_list_mock, inst_by_uuid, migr_mock,
inst_save_mock)
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
# This is good enough to prevent a lazy-load; value is unimportant
migr_obj['updated_at'] = None
self.instance = _MIGRATION_INSTANCE_FIXTURES[migr_obj['instance_uuid']]
self.instance.migration_context = (
_MIGRATION_CONTEXT_FIXTURES[self.instance.uuid])
self.audit(self.rt, [], [migr_obj, migr_obj], self.instance)
self.assertEqual(1, len(self.rt.tracked_migrations))
class TestInstanceInResizeState(test.NoDBTestCase):
def test_active_suspending(self):
instance = objects.Instance(vm_state=vm_states.ACTIVE,
task_state=task_states.SUSPENDING)
self.assertFalse(resource_tracker._instance_in_resize_state(instance))
def test_resized_suspending(self):
instance = objects.Instance(vm_state=vm_states.RESIZED,
task_state=task_states.SUSPENDING)
self.assertTrue(resource_tracker._instance_in_resize_state(instance))
def test_resized_resize_migrating(self):
instance = objects.Instance(vm_state=vm_states.RESIZED,
task_state=task_states.RESIZE_MIGRATING)
self.assertTrue(resource_tracker._instance_in_resize_state(instance))
def test_resized_resize_finish(self):
instance = objects.Instance(vm_state=vm_states.RESIZED,
task_state=task_states.RESIZE_FINISH)
self.assertTrue(resource_tracker._instance_in_resize_state(instance))
class TestSetInstanceHostAndNode(BaseTestCase):
def setUp(self):
super(TestSetInstanceHostAndNode, self).setUp()
self._setup_rt()
@mock.patch('nova.objects.Instance.save')
def test_set_instance_host_and_node(self, save_mock):
inst = objects.Instance()
self.rt._set_instance_host_and_node(inst)
save_mock.assert_called_once_with()
self.assertEqual(self.rt.host, inst.host)
self.assertEqual(self.rt.nodename, inst.node)
self.assertEqual(self.rt.host, inst.launched_on)
@mock.patch('nova.objects.Instance.save')
def test_unset_instance_host_and_node(self, save_mock):
inst = objects.Instance()
self.rt._set_instance_host_and_node(inst)
self.rt._unset_instance_host_and_node(inst)
self.assertEqual(2, save_mock.call_count)
self.assertIsNone(inst.host)
self.assertIsNone(inst.node)
self.assertEqual(self.rt.host, inst.launched_on)
def _update_compute_node(node, **kwargs):
for key, value in kwargs.items():
setattr(node, key, value)
| |
from __future__ import unicode_literals
import re
try:
from collections import OrderedDict
except ImportError:
OrderedDict = None
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib import auth
from emis_account.compat import get_user_model, get_user_lookup_kwargs
from emis_account.conf import settings
from emis_account.models import EmailAddress
from emis_account.models import User_Category
from captcha.fields import CaptchaField
alnum_re = re.compile(r"^\w+$")
class SignupForm(forms.Form):
username = forms.CharField(
label=_("Username"),
max_length=30,
widget=forms.TextInput(),
required=True
)
password = forms.CharField(
label=_("Password"),
initial='optimist',
widget=forms.HiddenInput()
)
password_confirm = forms.CharField(
label=_("Password (again)"),
initial='optimist',
widget=forms.HiddenInput()
)
email = forms.EmailField(
label=_("Email"),
widget=forms.TextInput(), required=True)
code = forms.CharField(
max_length=64,
required=False,
widget=forms.HiddenInput()
)
mobile_number = forms.IntegerField(
label=_("Mobile Number"),
min_value=6000000000,
max_value=9999999999,
required=True,
widget=forms.TextInput()
)
#category =forms.ModelChoiceField(Zone, widget=Select(), required=True)
category = forms.ModelChoiceField(queryset=User_Category.objects.all())
associated = forms.CharField(
label =_("Associated With"),
required=True,
max_length=20,
widget=forms.TextInput()
)
def clean_username(self):
if not alnum_re.search(self.cleaned_data["username"]):
raise forms.ValidationError(_("Usernames can only contain letters, numbers and underscores."))
User = get_user_model()
lookup_kwargs = get_user_lookup_kwargs({
"{username}__iexact": self.cleaned_data["username"]
})
qs = User.objects.filter(**lookup_kwargs)
if not qs.exists():
return self.cleaned_data["username"]
raise forms.ValidationError(_("This username is already taken. Please choose another."))
def clean_email(self):
value = self.cleaned_data["email"]
qs = EmailAddress.objects.filter(email__iexact=value)
if not qs.exists() or not settings.ACCOUNT_EMAIL_UNIQUE:
return value
raise forms.ValidationError(_("A user is registered with this email address."))
def clean(self):
if "password" in self.cleaned_data and "password_confirm" in self.cleaned_data:
if self.cleaned_data["password"] != self.cleaned_data["password_confirm"]:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data
class LoginForm(forms.Form):
password = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput(render_value=False)
)
remember = forms.BooleanField(
label=_("Remember Me"),
required=False
)
user = None
def clean(self):
if self._errors:
return
user = auth.authenticate(**self.user_credentials())
if user:
if user.is_active:
self.user = user
else:
raise forms.ValidationError(_("This account is inactive."))
else:
raise forms.ValidationError(self.authentication_fail_message)
return self.cleaned_data
def user_credentials(self):
return {
"username": self.cleaned_data[self.identifier_field],
"password": self.cleaned_data["password"],
}
class LoginUsernameForm(LoginForm):
username = forms.CharField(label=_("Username"), max_length=30)
captcha = CaptchaField()
authentication_fail_message = _("The username and/or password you specified are not correct.")
identifier_field = "username"
def __init__(self, *args, **kwargs):
super(LoginUsernameForm, self).__init__(*args, **kwargs)
field_order = ["username", "password", "captcha"]
if not OrderedDict or hasattr(self.fields, "keyOrder"):
self.fields.keyOrder = field_order
else:
self.fields = OrderedDict((k, self.fields[k]) for k in field_order)
class LoginEmailForm(LoginForm):
email = forms.EmailField(label=_("Email"))
authentication_fail_message = _("The email address and/or password you specified are not correct.")
identifier_field = "email"
def __init__(self, *args, **kwargs):
super(LoginEmailForm, self).__init__(*args, **kwargs)
field_order = ["email", "password", "remember"]
if not OrderedDict or hasattr(self.fields, "keyOrder"):
self.fields.keyOrder = field_order
else:
self.fields = OrderedDict((k, self.fields[k]) for k in field_order)
class ChangePasswordForm(forms.Form):
password_current = forms.CharField(
label=_("Current Password"),
widget=forms.PasswordInput(render_value=False)
)
password_new = forms.CharField(
label=_("New Password"),
widget=forms.PasswordInput(render_value=False)
)
password_new_confirm = forms.CharField(
label=_("New Password (again)"),
widget=forms.PasswordInput(render_value=False)
)
def __init__(self, *args, **kwargs):
self.user = kwargs.pop("user")
super(ChangePasswordForm, self).__init__(*args, **kwargs)
def clean_password_current(self):
if not self.user.check_password(self.cleaned_data.get("password_current")):
raise forms.ValidationError(_("Please type your current password."))
return self.cleaned_data["password_current"]
def clean_password_new_confirm(self):
if "password_new" in self.cleaned_data and "password_new_confirm" in self.cleaned_data:
if self.cleaned_data["password_new"] != self.cleaned_data["password_new_confirm"]:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data["password_new_confirm"]
class PasswordResetForm(forms.Form):
email = forms.EmailField(label=_("Email"), required=True)
def clean_email(self):
value = self.cleaned_data["email"]
if not EmailAddress.objects.filter(email__iexact=value).exists():
raise forms.ValidationError(_("Email address can not be found."))
return value
class PasswordResetTokenForm(forms.Form):
password = forms.CharField(
label=_("New Password"),
widget=forms.PasswordInput(render_value=False)
)
password_confirm = forms.CharField(
label=_("New Password (again)"),
widget=forms.PasswordInput(render_value=False)
)
def clean_password_confirm(self):
if "password" in self.cleaned_data and "password_confirm" in self.cleaned_data:
if self.cleaned_data["password"] != self.cleaned_data["password_confirm"]:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data["password_confirm"]
class SettingsForm(forms.Form):
email = forms.EmailField(label=_("Email"), required=True)
mobile_number = forms.IntegerField(label=_("Mobile"), required=True)
def clean_email(self):
value = self.cleaned_data["email"]
if self.initial.get("email") == value:
return value
qs = EmailAddress.objects.filter(email__iexact=value)
if not qs.exists() or not settings.ACCOUNT_EMAIL_UNIQUE:
return value
raise forms.ValidationError(_("A user is registered with this email address."))
def clean_mobile(self):
value = self.cleaned_data["mobile_number"]
if self.initial.get("mobile_number") == value:
return value
qs = Account.objects.filter(mobile_number__iexact=value)
if not qs.exists():
return value
raise forms.ValidationError(_("A user is registered with this mobile number."))
| |
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_config import cfg
from oslo_log import handlers
from oslo_log import log as oslogging
from rally.common.i18n import _
log = __import__("logging")
DEBUG_OPTS = [cfg.BoolOpt(
"rally-debug",
default=False,
help="Print debugging output only for Rally. "
"Off-site components stay quiet.")]
CONF = cfg.CONF
CONF.register_cli_opts(DEBUG_OPTS)
oslogging.register_options(CONF)
log.RDEBUG = log.DEBUG + 1
log.addLevelName(log.RDEBUG, "RALLYDEBUG")
CRITICAL = log.CRITICAL
DEBUG = log.DEBUG
ERROR = log.ERROR
FATAL = log.FATAL
INFO = log.INFO
NOTSET = log.NOTSET
RDEBUG = log.RDEBUG
WARN = log.WARN
WARNING = log.WARNING
def setup(product_name, version="unknown"):
dbg_color = handlers.ColorHandler.LEVEL_COLORS[log.DEBUG]
handlers.ColorHandler.LEVEL_COLORS[log.RDEBUG] = dbg_color
oslogging.setup(CONF, product_name, version)
if CONF.rally_debug:
oslogging.getLogger(
project=product_name).logger.setLevel(log.RDEBUG)
class RallyContextAdapter(oslogging.KeywordArgumentAdapter):
def debug(self, msg, *args, **kwargs):
self.log(log.RDEBUG, msg, *args, **kwargs)
def getLogger(name="unknown", version="unknown"):
if name not in oslogging._loggers:
oslogging._loggers[name] = RallyContextAdapter(log.getLogger(name),
{"project": "rally",
"version": version})
return oslogging._loggers[name]
LOG = getLogger(__name__)
class ExceptionLogger(object):
"""Context that intercepts and logs exceptions.
Usage::
LOG = logging.getLogger(__name__)
...
def foobar():
with ExceptionLogger(LOG, "foobar warning") as e:
return house_of_raising_exception()
if e.exception:
raise e.exception # remove if not required
"""
def __init__(self, logger, warn=None):
self.logger = logger
self.warn = warn
self.exception = None
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
if value:
self.exception = value
if self.warn:
self.logger.warning(self.warn)
self.logger.debug(value)
if is_debug():
self.logger.exception(value)
return True
class CatcherHandler(log.handlers.BufferingHandler):
def __init__(self):
log.handlers.BufferingHandler.__init__(self, 0)
def shouldFlush(self):
return False
def emit(self, record):
self.buffer.append(record)
class LogCatcher(object):
"""Context manager that catches log messages.
User can make an assertion on their content or fetch them all.
Usage::
LOG = logging.getLogger(__name__)
...
def foobar():
with LogCatcher(LOG) as catcher_in_rye:
LOG.warning("Running Kids")
catcher_in_rye.assertInLogs("Running Kids")
"""
def __init__(self, logger):
self.logger = getattr(logger, "logger", logger)
self.handler = CatcherHandler()
def __enter__(self):
self.logger.addHandler(self.handler)
return self
def __exit__(self, type_, value, traceback):
self.logger.removeHandler(self.handler)
def assertInLogs(self, msg):
"""Assert that `msg' is a substring at least of one logged message.
:param msg: Substring to look for.
:return: Log messages where the `msg' was found.
Raises AssertionError if none.
"""
in_logs = [record.msg
for record in self.handler.buffer if msg in record.msg]
if not in_logs:
raise AssertionError("Expected `%s' is not in logs" % msg)
return in_logs
def fetchLogRecords(self):
"""Returns all logged Records."""
return self.handler.buffer
def fetchLogs(self):
"""Returns all logged messages."""
return [record.msg for record in self.handler.buffer]
def _log_wrapper(obj, log_function, msg, **kw):
"""A logging wrapper for any method of a class.
Class instances that use this decorator should have self.task or
self.deployment attribute. The wrapper produces logs messages both
before and after the method execution, in the following format
(example for tasks):
"Task <Task UUID> | Starting: <Logging message>"
[Method execution...]
"Task <Task UUID> | Completed: <Logging message>"
:param obj: task or deployment which must be attribute of "self"
:param log_function: Logging method to be used, e.g. LOG.info
:param msg: Text message (possibly parameterized) to be put to the log
:param **kw: Parameters for msg
"""
def decorator(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
params = {"msg": msg % kw, "obj_name": obj.title(),
"uuid": getattr(self, obj)["uuid"]}
log_function(_("%(obj_name)s %(uuid)s | Starting: %(msg)s") %
params)
result = f(self, *args, **kwargs)
log_function(_("%(obj_name)s %(uuid)s | Completed: %(msg)s") %
params)
return result
return wrapper
return decorator
def log_task_wrapper(log_function, msg, **kw):
return _log_wrapper("task", log_function, msg, **kw)
def log_deploy_wrapper(log_function, msg, **kw):
return _log_wrapper("deployment", log_function, msg, **kw)
def log_verification_wrapper(log_function, msg, **kw):
return _log_wrapper("verification", log_function, msg, **kw)
def log_deprecated(message, rally_version, log_function=None, once=False):
"""A wrapper marking a certain method as deprecated.
:param message: Message that describes why the method was deprecated
:param rally_version: version of Rally when the method was deprecated
:param log_function: Logging method to be used, e.g. LOG.info
:param once: Show only once (default is each)
"""
log_function = log_function or LOG.warning
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
if (not once) or (not getattr(f, "_warned_dep_method", False)):
log_function("'%(func)s' is deprecated in Rally v%(version)s: "
"%(msg)s" % {"msg": message,
"version": rally_version,
"func": f.__name__})
setattr(f, "_warned_dep_method", once)
return f(*args, **kwargs)
return wrapper
return decorator
def log_deprecated_args(message, rally_version, deprecated_args,
log_function=None, once=False):
"""A wrapper marking certain arguments as deprecated.
:param message: Message that describes why the arguments were deprecated
:param rally_version: version of Rally when the arguments were deprecated
:param deprecated_args: List of deprecated args.
:param log_function: Logging method to be used, e.g. LOG.info
:param once: Show only once (default is each)
"""
log_function = log_function or LOG.warning
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
if (not once) or (not getattr(f, "_warned_dep_args", False)):
deprecated = ", ".join([
"`%s'" % x for x in deprecated_args if x in kwargs])
if deprecated:
log_function(
"%(msg)s (args %(args)s deprecated in Rally "
"v%(version)s)" %
{"msg": message, "version": rally_version,
"args": deprecated})
setattr(f, "_warned_dep_args", once)
result = f(*args, **kwargs)
return result
return wrapper
return decorator
def is_debug():
return CONF.debug or CONF.rally_debug
| |
from config import config
from controller import Controller
import web
from web import form
import numpy as np
import model.regels
import model.ksgroup
from functions import moneyfmt
from matplotlib import cm
class View(Controller):
def __init__(self):
Controller.__init__(self)
# subclass specific
self.title = 'View'
self.module = 'view'
self.webrender = web.template.render('webpages/view/')
# View specific:
order = int(web.input(order=0)['order'])
order_inst = model.orders.load(orders_load=[order]).orders
if order_inst:
self.order = order_inst[0]
else:
self.order = model.budget.order.Order()
self.order.load_unknown(order)
self.year = int(web.input(year=self.config["currentYear"])['year'])
self.ksgroup_name = web.input(ksgroup_name=config['ksgroup']['default'])['ksgroup_name']
self.periode = (web.input(periode='ALL')['periode'])
subgroups = False
if web.input().has_key('subgroups'):
subgroups = True
# Forms
dropdown_options = self.dropdown_options()
self.form_settings_simple = form.Form(
form.Dropdown('year', dropdown_options['years'], value=self.year, class_="btn btn-default btn-sm"),
form.Dropdown('periode', dropdown_options['periode_all'], value=self.periode, class_="btn btn-default btn-sm"),
form.Dropdown('ksgroup_name', model.ksgroup.available(), value=self.ksgroup_name, class_="btn btn-default btn-sm"),
form.Checkbox('subgroups', description='Show subgroups:', checked=subgroups),
form.Button('Update', 'update', class_="btn btn-default btn-sm"),
)
# Load the ksgroup
self.ksgroup_root = model.ksgroup.load(self.ksgroup_name)
self.ksgroups = {}
for batenlasten in ['baten', 'lasten']:
self.ksgroups[batenlasten] = []
for ksgroup_name in list(config['ksgroup']['ksgroups'][self.ksgroup_name][batenlasten]):
ksgroup = self.ksgroup_root.find(ksgroup_name)
if ksgroup.children:
if subgroups:
self.ksgroups[batenlasten].extend(ksgroup.get_end_children([]))
else:
for ksgroup in ksgroup.children:
self.ksgroups[batenlasten].append(ksgroup)
else:
self.ksgroups[batenlasten].append(ksgroup)
def authorized(self):
if model.users.check_permission(['view']):
ordernummer = self.order.ordernummer
if self.order.ordernummer in model.users.orders_allowed():
return True
return False
def process_sub(self):
data, totals = self.construct_data()
view = {}
view['title'] = '%s - %s' % (self.order.ordernummer,self.order.ordernaam)
view['summary'] = self.render_summary(totals)
view['settings'] = self.render_settings()
view['javaScripts'] = self.render_java_scripts(data)
self.convert_data_to_str(data, totals)
view['tables'] = self.render_tables(data, totals)
self.body = self.webrender.view(view)
return
def construct_data(self):
# data = { <name of ks_group>: { 'kosten/begroot': regellist}
# totals {'geboekt/obligo/totals':<total>}
regels = {}
regels = model.regels.load(['geboekt', 'obligo'], years_load=[self.year], orders_load=[self.order.ordernummer], periods_load=self.periode)
regels.extend(model.regels.load(['plan'], years_load=[self.year], orders_load=[self.order.ordernummer]))
regels_dict = regels.split(['tiepe', 'kostensoort'])
data = {}
totals = {}
totals['total'] = {}
totals['total']['geboekt'] = 0
totals['total']['obligo'] = 0
totals['total']['plan'] = 0
for batenlasten, ksgroups in self.ksgroups.iteritems():
for ksgroup in ksgroups:
for ks, descr in ksgroup.get_ks_recursive().items():
for tiepe in ['geboekt', 'obligo', 'plan']:
if tiepe in regels_dict:
if ks in regels_dict[tiepe]:
if ksgroup.name not in totals:
totals[ksgroup.name] = {}
totals[ksgroup.name]['geboekt'] = 0
totals[ksgroup.name]['obligo'] = 0
totals[ksgroup.name]['plan'] = 0
data[ksgroup.name] = {}
data[ksgroup.name]['kosten'] = None
data[ksgroup.name]['begroot'] = None
key = ('kosten', 'begroot')[tiepe=='plan']
if data[ksgroup.name][key] is None:
data[ksgroup.name][key] = regels_dict[tiepe][ks]
else:
data[ksgroup.name][key].extend(regels_dict[tiepe][ks])
totals[ksgroup.name][tiepe] += regels_dict[tiepe][ks].total()
totals['total'][tiepe] += regels_dict[tiepe][ks].total()
# sort regels in regellist by 'periode' for view:
for ks_group in data.keys():
for key in data[ks_group]:
if data[ks_group][key] is not None:
data[ks_group][key].sort('periode')
return data, totals
def render_tables(self, data, totals):
tables = []
for batenlasten, ksgroups in self.ksgroups.iteritems():
for ksgroup in ksgroups:
if ksgroup.name in data:
header = {}
header['descr'] = ksgroup.descr
header['name'] = ksgroup.name
header['id'] = hash(ksgroup.name)
regels = []
if data[ksgroup.name]['kosten'] is not None:
regels = data[ksgroup.name]['kosten'].regels
table = self.webrender.table(regels, header, totals)
tables.append(table)
return tables
def convert_data_to_str(self, data, totals):
for ks_group, data_dict in data.iteritems():
for tiepe, regels in data_dict.iteritems():
if regels is not None:
for regel in regels.regels:
regel.kosten = moneyfmt(regel.kosten)
for ks_group, data_dict in totals.iteritems():
for tiepe, total in data_dict.iteritems():
totals[ks_group][tiepe] = moneyfmt(total)
return data, totals
def render_summary(self, totals):
summary = {}
summary['begroting'] = totals['total']['plan']
summary['baten'] = 0
for ksgroup in self.ksgroups['baten']:
if ksgroup.name in totals:
summary['baten'] = summary['baten'] + totals[ksgroup.name]['geboekt']
summary['lasten'] = totals['total']['geboekt'] - summary['baten']
summary['obligo'] = totals['total']['obligo'] - summary['baten']
summary['ruimte'] = summary['begroting'] - summary['baten'] - summary['lasten']
for key in summary.keys():
summary[key] = moneyfmt(summary[key])
summary['bh'] = self.order.budgethouder
summary['subact'] = self.order.subactiviteitencode
summary['graph_realisatie'] = self.url_graph(self.year, self.ksgroup_name, 'realisatie', self.order.ordernummer)
return self.webrender.summary(summary)
def render_settings(self):
form_settings = self.form_settings_simple
return self.webrender.settings(form_settings)
def render_java_scripts(self, data):
expand_items = []
for ks_group in data.keys():
expand_items.append(hash(ks_group))
return self.webrender.javascripts(expand_items)
| |
"""Exercises for eager loading.
Derived from mailing list-reported problems and issue tracker issues.
These are generally very old 0.1-era tests and at some point should
be cleaned up and modernized.
"""
import datetime
import sqlalchemy as sa
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy.orm import backref
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
class EagerTest(fixtures.MappedTest):
run_deletes = None
run_inserts = "once"
run_setup_mappers = "once"
@classmethod
def define_tables(cls, metadata):
Table(
"owners",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(30)),
)
Table(
"categories",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(20)),
)
Table(
"tests",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column(
"owner_id", Integer, ForeignKey("owners.id"), nullable=False
),
Column(
"category_id",
Integer,
ForeignKey("categories.id"),
nullable=False,
),
)
Table(
"options",
metadata,
Column(
"test_id", Integer, ForeignKey("tests.id"), primary_key=True
),
Column(
"owner_id", Integer, ForeignKey("owners.id"), primary_key=True
),
Column(
"someoption",
sa.Boolean,
server_default=sa.false(),
nullable=False,
),
)
@classmethod
def setup_classes(cls):
class Owner(cls.Basic):
pass
class Category(cls.Basic):
pass
class Thing(cls.Basic):
pass
class Option(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Category, owners, Option, tests, Thing, Owner, options, categories = (
cls.classes.Category,
cls.tables.owners,
cls.classes.Option,
cls.tables.tests,
cls.classes.Thing,
cls.classes.Owner,
cls.tables.options,
cls.tables.categories,
)
cls.mapper_registry.map_imperatively(Owner, owners)
cls.mapper_registry.map_imperatively(Category, categories)
cls.mapper_registry.map_imperatively(
Option,
options,
properties=dict(
owner=relationship(Owner, viewonly=True),
test=relationship(Thing, viewonly=True),
),
)
cls.mapper_registry.map_imperatively(
Thing,
tests,
properties=dict(
owner=relationship(Owner, backref="tests"),
category=relationship(Category),
owner_option=relationship(
Option,
primaryjoin=sa.and_(
tests.c.id == options.c.test_id,
tests.c.owner_id == options.c.owner_id,
),
foreign_keys=[options.c.test_id, options.c.owner_id],
uselist=False,
),
),
)
@classmethod
def insert_data(cls, connection):
Owner, Category, Option, Thing = (
cls.classes.Owner,
cls.classes.Category,
cls.classes.Option,
cls.classes.Thing,
)
session = Session(connection)
o = Owner()
c = Category(name="Some Category")
session.add_all(
(
Thing(owner=o, category=c),
Thing(
owner=o, category=c, owner_option=Option(someoption=True)
),
Thing(owner=o, category=c, owner_option=Option()),
)
)
session.flush()
def test_noorm(self, connection):
"""test the control case"""
tests, options, categories = (
self.tables.tests,
self.tables.options,
self.tables.categories,
)
# I want to display a list of tests owned by owner 1
# if someoption is false or they haven't specified it yet (null)
# but not if they set it to true (example someoption is for hiding)
# desired output for owner 1
# test_id, cat_name
# 1 'Some Category'
# 3 "
# not orm style correct query
print("Obtaining correct results without orm")
result = connection.execute(
sa.select(tests.c.id, categories.c.name)
.where(
sa.and_(
tests.c.owner_id == 1,
sa.or_(
options.c.someoption == None, # noqa
options.c.someoption == False,
),
)
)
.order_by(tests.c.id)
.select_from(
tests.join(categories).outerjoin(
options,
sa.and_(
tests.c.id == options.c.test_id,
tests.c.owner_id == options.c.owner_id,
),
)
)
).fetchall()
eq_(result, [(1, "Some Category"), (3, "Some Category")])
def test_withoutjoinedload(self):
Thing, tests, options = (
self.classes.Thing,
self.tables.tests,
self.tables.options,
)
s = fixture_session()
result = (
s.query(Thing)
.select_from(
tests.outerjoin(
options,
sa.and_(
tests.c.id == options.c.test_id,
tests.c.owner_id == options.c.owner_id,
),
)
)
.filter(
sa.and_(
tests.c.owner_id == 1,
sa.or_(
options.c.someoption == None, # noqa
options.c.someoption == False,
),
)
)
)
result_str = ["%d %s" % (t.id, t.category.name) for t in result]
eq_(result_str, ["1 Some Category", "3 Some Category"])
def test_withjoinedload(self):
"""
Test that an joinedload locates the correct "from" clause with which to
attach to, when presented with a query that already has a complicated
from clause.
"""
Thing, tests, options = (
self.classes.Thing,
self.tables.tests,
self.tables.options,
)
s = fixture_session()
q = s.query(Thing).options(sa.orm.joinedload("category"))
result = q.select_from(
tests.outerjoin(
options,
sa.and_(
tests.c.id == options.c.test_id,
tests.c.owner_id == options.c.owner_id,
),
)
).filter(
sa.and_(
tests.c.owner_id == 1,
sa.or_(
options.c.someoption == None,
options.c.someoption == False, # noqa
),
)
)
result_str = ["%d %s" % (t.id, t.category.name) for t in result]
eq_(result_str, ["1 Some Category", "3 Some Category"])
def test_dslish(self):
"""test the same as withjoinedload except using generative"""
Thing, tests, options = (
self.classes.Thing,
self.tables.tests,
self.tables.options,
)
s = fixture_session()
q = s.query(Thing).options(sa.orm.joinedload("category"))
result = q.filter(
sa.and_(
tests.c.owner_id == 1,
sa.or_(
options.c.someoption == None,
options.c.someoption == False, # noqa
),
)
).outerjoin("owner_option")
result_str = ["%d %s" % (t.id, t.category.name) for t in result]
eq_(result_str, ["1 Some Category", "3 Some Category"])
@testing.crashes("sybase", "FIXME: unknown, verify not fails_on")
def test_without_outerjoin_literal(self):
Thing, tests = (self.classes.Thing, self.tables.tests)
s = fixture_session()
q = s.query(Thing).options(sa.orm.joinedload("category"))
result = q.filter(
(tests.c.owner_id == 1)
& text(
"options.someoption is null or options.someoption=:opt"
).bindparams(opt=False)
).join("owner_option")
result_str = ["%d %s" % (t.id, t.category.name) for t in result]
eq_(result_str, ["3 Some Category"])
def test_withoutouterjoin(self):
Thing, tests, options = (
self.classes.Thing,
self.tables.tests,
self.tables.options,
)
s = fixture_session()
q = s.query(Thing).options(sa.orm.joinedload("category"))
result = q.filter(
(tests.c.owner_id == 1)
& (
(options.c.someoption == None)
| (options.c.someoption == False)
) # noqa
).join("owner_option")
result_str = ["%d %s" % (t.id, t.category.name) for t in result]
eq_(result_str, ["3 Some Category"])
class EagerTest2(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"left",
metadata,
Column("id", Integer, ForeignKey("middle.id"), primary_key=True),
Column("data", String(50), primary_key=True),
)
Table(
"middle",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(50)),
)
Table(
"right",
metadata,
Column("id", Integer, ForeignKey("middle.id"), primary_key=True),
Column("data", String(50), primary_key=True),
)
@classmethod
def setup_classes(cls):
class Left(cls.Basic):
def __init__(self, data):
self.data = data
class Middle(cls.Basic):
def __init__(self, data):
self.data = data
class Right(cls.Basic):
def __init__(self, data):
self.data = data
@classmethod
def setup_mappers(cls):
Right, Middle, middle, right, left, Left = (
cls.classes.Right,
cls.classes.Middle,
cls.tables.middle,
cls.tables.right,
cls.tables.left,
cls.classes.Left,
)
# set up bi-directional eager loads
cls.mapper_registry.map_imperatively(Left, left)
cls.mapper_registry.map_imperatively(Right, right)
cls.mapper_registry.map_imperatively(
Middle,
middle,
properties=dict(
left=relationship(
Left,
lazy="joined",
backref=backref("middle", lazy="joined"),
),
right=relationship(
Right,
lazy="joined",
backref=backref("middle", lazy="joined"),
),
),
),
def test_eager_terminate(self):
"""Eager query generation does not include the same mapper's table twice.
Or, that bi-directional eager loads don't include each other in eager
query generation.
"""
Middle, Right, Left = (
self.classes.Middle,
self.classes.Right,
self.classes.Left,
)
p = Middle("m1")
p.left.append(Left("l1"))
p.right.append(Right("r1"))
session = fixture_session()
session.add(p)
session.flush()
session.expunge_all()
session.query(Left).filter_by(data="l1").one()
class EagerTest3(fixtures.MappedTest):
"""Eager loading combined with nested SELECT statements, functions, and
aggregates."""
@classmethod
def define_tables(cls, metadata):
Table(
"datas",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("a", Integer, nullable=False),
)
Table(
"foo",
metadata,
Column(
"data_id", Integer, ForeignKey("datas.id"), primary_key=True
),
Column("bar", Integer),
)
Table(
"stats",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data_id", Integer, ForeignKey("datas.id")),
Column("somedata", Integer, nullable=False),
)
@classmethod
def setup_classes(cls):
class Data(cls.Basic):
pass
class Foo(cls.Basic):
pass
class Stat(cls.Basic):
pass
def test_nesting_with_functions(self):
Stat, Foo, stats, foo, Data, datas = (
self.classes.Stat,
self.classes.Foo,
self.tables.stats,
self.tables.foo,
self.classes.Data,
self.tables.datas,
)
self.mapper_registry.map_imperatively(Data, datas)
self.mapper_registry.map_imperatively(
Foo,
foo,
properties={
"data": relationship(
Data, backref=backref("foo", uselist=False)
)
},
)
self.mapper_registry.map_imperatively(
Stat, stats, properties={"data": relationship(Data)}
)
session = fixture_session()
data = [Data(a=x) for x in range(5)]
session.add_all(data)
session.add_all(
(
Stat(data=data[0], somedata=1),
Stat(data=data[1], somedata=2),
Stat(data=data[2], somedata=3),
Stat(data=data[3], somedata=4),
Stat(data=data[4], somedata=5),
Stat(data=data[0], somedata=6),
Stat(data=data[1], somedata=7),
Stat(data=data[2], somedata=8),
Stat(data=data[3], somedata=9),
Stat(data=data[4], somedata=10),
)
)
session.flush()
arb_data = (
sa.select(
stats.c.data_id, sa.func.max(stats.c.somedata).label("max")
)
.where(stats.c.data_id <= 5)
.group_by(stats.c.data_id)
)
arb_result = session.connection().execute(arb_data).fetchall()
# order the result list descending based on 'max'
arb_result.sort(key=lambda a: a._mapping["max"], reverse=True)
# extract just the "data_id" from it
arb_result = [row._mapping["data_id"] for row in arb_result]
arb_data = arb_data.alias("arb")
# now query for Data objects using that above select, adding the
# "order by max desc" separately
q = (
session.query(Data)
.options(sa.orm.joinedload("foo"))
.select_from(
datas.join(arb_data, arb_data.c.data_id == datas.c.id)
)
.order_by(sa.desc(arb_data.c.max))
.limit(10)
)
# extract "data_id" from the list of result objects
verify_result = [d.id for d in q]
eq_(verify_result, arb_result)
class EagerTest4(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"departments",
metadata,
Column(
"department_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(50)),
)
Table(
"employees",
metadata,
Column(
"person_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(50)),
Column(
"department_id",
Integer,
ForeignKey("departments.department_id"),
),
)
@classmethod
def setup_classes(cls):
class Department(cls.Basic):
pass
class Employee(cls.Basic):
pass
def test_basic(self):
Department, Employee, employees, departments = (
self.classes.Department,
self.classes.Employee,
self.tables.employees,
self.tables.departments,
)
self.mapper_registry.map_imperatively(Employee, employees)
self.mapper_registry.map_imperatively(
Department,
departments,
properties=dict(
employees=relationship(
Employee, lazy="joined", backref="department"
)
),
)
d1 = Department(name="One")
for e in "Jim", "Jack", "John", "Susan":
d1.employees.append(Employee(name=e))
d2 = Department(name="Two")
for e in "Joe", "Bob", "Mary", "Wally":
d2.employees.append(Employee(name=e))
sess = fixture_session()
sess.add_all((d1, d2))
sess.flush()
q = (
sess.query(Department)
.join("employees")
.filter(Employee.name.startswith("J"))
.distinct()
.order_by(sa.desc(Department.name))
)
eq_(q.count(), 2)
assert q[0] is d2
class EagerTest5(fixtures.MappedTest):
"""Construction of AliasedClauses for the same eager load property but
different parent mappers, due to inheritance."""
@classmethod
def define_tables(cls, metadata):
Table(
"base",
metadata,
Column("uid", String(30), primary_key=True),
Column("x", String(30)),
)
Table(
"derived",
metadata,
Column(
"uid", String(30), ForeignKey("base.uid"), primary_key=True
),
Column("y", String(30)),
)
Table(
"derivedII",
metadata,
Column(
"uid", String(30), ForeignKey("base.uid"), primary_key=True
),
Column("z", String(30)),
)
Table(
"comments",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("uid", String(30), ForeignKey("base.uid")),
Column("comment", String(30)),
)
@classmethod
def setup_classes(cls):
class Base(cls.Basic):
def __init__(self, uid, x):
self.uid = uid
self.x = x
class Derived(Base):
def __init__(self, uid, x, y):
self.uid = uid
self.x = x
self.y = y
class DerivedII(Base):
def __init__(self, uid, x, z):
self.uid = uid
self.x = x
self.z = z
class Comment(cls.Basic):
def __init__(self, uid, comment):
self.uid = uid
self.comment = comment
def test_basic(self):
(
Comment,
Derived,
derived,
comments,
DerivedII,
Base,
base,
derivedII,
) = (
self.classes.Comment,
self.classes.Derived,
self.tables.derived,
self.tables.comments,
self.classes.DerivedII,
self.classes.Base,
self.tables.base,
self.tables.derivedII,
)
self.mapper_registry.map_imperatively(Comment, comments)
baseMapper = self.mapper_registry.map_imperatively(
Base,
base,
properties=dict(
comments=relationship(
Comment, lazy="joined", cascade="all, delete-orphan"
)
),
)
self.mapper_registry.map_imperatively(
Derived, derived, inherits=baseMapper
)
self.mapper_registry.map_imperatively(
DerivedII, derivedII, inherits=baseMapper
)
sess = fixture_session()
d = Derived("uid1", "x", "y")
d.comments = [Comment("uid1", "comment")]
d2 = DerivedII("uid2", "xx", "z")
d2.comments = [Comment("uid2", "comment")]
sess.add_all((d, d2))
sess.flush()
sess.expunge_all()
# this eager load sets up an AliasedClauses for the "comment"
# relationship, then stores it in clauses_by_lead_mapper[mapper for
# Derived]
d = sess.query(Derived).get("uid1")
sess.expunge_all()
assert len([c for c in d.comments]) == 1
# this eager load sets up an AliasedClauses for the "comment"
# relationship, and should store it in clauses_by_lead_mapper[mapper
# for DerivedII]. the bug was that the previous AliasedClause create
# prevented this population from occurring.
d2 = sess.query(DerivedII).get("uid2")
sess.expunge_all()
# object is not in the session; therefore the lazy load cant trigger
# here, eager load had to succeed
assert len([c for c in d2.comments]) == 1
class EagerTest6(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"design_types",
metadata,
Column(
"design_type_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
)
Table(
"design",
metadata,
Column(
"design_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column(
"design_type_id",
Integer,
ForeignKey("design_types.design_type_id"),
),
)
Table(
"parts",
metadata,
Column(
"part_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("design_id", Integer, ForeignKey("design.design_id")),
Column(
"design_type_id",
Integer,
ForeignKey("design_types.design_type_id"),
),
)
Table(
"inherited_part",
metadata,
Column(
"ip_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("part_id", Integer, ForeignKey("parts.part_id")),
Column("design_id", Integer, ForeignKey("design.design_id")),
)
@classmethod
def setup_classes(cls):
class Part(cls.Basic):
pass
class Design(cls.Basic):
pass
class DesignType(cls.Basic):
pass
class InheritedPart(cls.Basic):
pass
def test_one(self):
(
Part,
inherited_part,
design_types,
DesignType,
parts,
design,
Design,
InheritedPart,
) = (
self.classes.Part,
self.tables.inherited_part,
self.tables.design_types,
self.classes.DesignType,
self.tables.parts,
self.tables.design,
self.classes.Design,
self.classes.InheritedPart,
)
p_m = self.mapper_registry.map_imperatively(Part, parts)
self.mapper_registry.map_imperatively(
InheritedPart,
inherited_part,
properties=dict(part=relationship(Part, lazy="joined")),
)
d_m = self.mapper_registry.map_imperatively(
Design,
design,
properties=dict(
inheritedParts=relationship(
InheritedPart,
cascade="all, delete-orphan",
backref="design",
)
),
)
self.mapper_registry.map_imperatively(DesignType, design_types)
d_m.add_property(
"type", relationship(DesignType, lazy="joined", backref="designs")
)
p_m.add_property(
"design",
relationship(
Design,
lazy="joined",
backref=backref("parts", cascade="all, delete-orphan"),
),
)
d = Design()
sess = fixture_session()
sess.add(d)
sess.flush()
sess.expunge_all()
x = sess.query(Design).get(1)
x.inheritedParts
class EagerTest7(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"companies",
metadata,
Column(
"company_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("company_name", String(40)),
)
Table(
"addresses",
metadata,
Column(
"address_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("company_id", Integer, ForeignKey("companies.company_id")),
Column("address", String(40)),
)
Table(
"phone_numbers",
metadata,
Column(
"phone_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("address_id", Integer, ForeignKey("addresses.address_id")),
Column("type", String(20)),
Column("number", String(10)),
)
Table(
"invoices",
metadata,
Column(
"invoice_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("company_id", Integer, ForeignKey("companies.company_id")),
Column("date", sa.DateTime),
)
@classmethod
def setup_classes(cls):
class Company(cls.Comparable):
pass
class Address(cls.Comparable):
pass
class Phone(cls.Comparable):
pass
class Invoice(cls.Comparable):
pass
def test_load_m2o_attached_to_o2(self):
"""
Tests eager load of a many-to-one attached to a one-to-many. this
testcase illustrated the bug, which is that when the single Company is
loaded, no further processing of the rows occurred in order to load
the Company's second Address object.
"""
addresses, invoices, Company, companies, Invoice, Address = (
self.tables.addresses,
self.tables.invoices,
self.classes.Company,
self.tables.companies,
self.classes.Invoice,
self.classes.Address,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(
Company,
companies,
properties={"addresses": relationship(Address, lazy="joined")},
)
self.mapper_registry.map_imperatively(
Invoice,
invoices,
properties={"company": relationship(Company, lazy="joined")},
)
a1 = Address(address="a1 address")
a2 = Address(address="a2 address")
c1 = Company(company_name="company 1", addresses=[a1, a2])
i1 = Invoice(date=datetime.datetime.now(), company=c1)
session = fixture_session()
session.add(i1)
session.flush()
company_id = c1.company_id
invoice_id = i1.invoice_id
session.expunge_all()
c = session.query(Company).get(company_id)
session.expunge_all()
i = session.query(Invoice).get(invoice_id)
def go():
eq_(c, i.company)
eq_(c.addresses, i.company.addresses)
self.assert_sql_count(testing.db, go, 0)
class EagerTest8(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
"prj",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("created", sa.DateTime),
Column("title", sa.String(100)),
)
Table(
"task",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column(
"status_id",
Integer,
ForeignKey("task_status.id"),
nullable=False,
),
Column("title", sa.String(100)),
Column(
"task_type_id",
Integer,
ForeignKey("task_type.id"),
nullable=False,
),
Column("prj_id", Integer, ForeignKey("prj.id"), nullable=False),
)
Table(
"task_status",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
Table(
"task_type",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
)
Table(
"msg",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("posted", sa.DateTime, index=True),
Column("type_id", Integer, ForeignKey("msg_type.id")),
Column("task_id", Integer, ForeignKey("task.id")),
)
Table(
"msg_type",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", sa.String(20)),
Column("display_name", sa.String(20)),
)
@classmethod
def fixtures(cls):
return dict(
prj=(("id",), (1,)),
task_status=(("id",), (1,)),
task_type=(("id",), (1,)),
task=(
("title", "task_type_id", "status_id", "prj_id"),
("task 1", 1, 1, 1),
),
)
@classmethod
def setup_classes(cls):
class Task_Type(cls.Comparable):
pass
class Joined(cls.Comparable):
pass
def test_nested_joins(self):
task, Task_Type, Joined, task_type, msg = (
self.tables.task,
self.classes.Task_Type,
self.classes.Joined,
self.tables.task_type,
self.tables.msg,
)
# this is testing some subtle column resolution stuff,
# concerning corresponding_column() being extremely accurate
# as well as how mapper sets up its column properties
self.mapper_registry.map_imperatively(Task_Type, task_type)
j = sa.outerjoin(task, msg, task.c.id == msg.c.task_id)
jj = (
sa.select(
task.c.id.label("task_id"),
sa.func.count(msg.c.id).label("props_cnt"),
)
.select_from(j)
.group_by(task.c.id)
.alias("prop_c_s")
)
jjj = sa.join(task, jj, task.c.id == jj.c.task_id)
self.mapper_registry.map_imperatively(
Joined,
jjj,
properties=dict(type=relationship(Task_Type, lazy="joined")),
)
session = fixture_session()
eq_(
session.query(Joined)
.order_by(Joined.id)
.limit(10)
.offset(0)
.one(),
Joined(id=1, title="task 1", props_cnt=0),
)
class EagerTest9(fixtures.MappedTest):
"""Test the usage of query options to eagerly load specific paths.
This relies upon the 'path' construct used by PropertyOption to relate
LoaderStrategies to specific paths, as well as the path state maintained
throughout the query setup/mapper instances process.
"""
@classmethod
def define_tables(cls, metadata):
Table(
"accounts",
metadata,
Column(
"account_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(40)),
)
Table(
"transactions",
metadata,
Column(
"transaction_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(40)),
)
Table(
"entries",
metadata,
Column(
"entry_id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
),
Column("name", String(40)),
Column("account_id", Integer, ForeignKey("accounts.account_id")),
Column(
"transaction_id",
Integer,
ForeignKey("transactions.transaction_id"),
),
)
@classmethod
def setup_classes(cls):
class Account(cls.Basic):
pass
class Transaction(cls.Basic):
pass
class Entry(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
Account, Transaction, transactions, accounts, entries, Entry = (
cls.classes.Account,
cls.classes.Transaction,
cls.tables.transactions,
cls.tables.accounts,
cls.tables.entries,
cls.classes.Entry,
)
cls.mapper_registry.map_imperatively(Account, accounts)
cls.mapper_registry.map_imperatively(Transaction, transactions)
cls.mapper_registry.map_imperatively(
Entry,
entries,
properties=dict(
account=relationship(
Account,
uselist=False,
backref=backref(
"entries", lazy="select", order_by=entries.c.entry_id
),
),
transaction=relationship(
Transaction,
uselist=False,
backref=backref(
"entries", lazy="joined", order_by=entries.c.entry_id
),
),
),
)
def test_joinedload_on_path(self):
Entry, Account, Transaction = (
self.classes.Entry,
self.classes.Account,
self.classes.Transaction,
)
session = fixture_session()
tx1 = Transaction(name="tx1")
tx2 = Transaction(name="tx2")
acc1 = Account(name="acc1")
Entry(name="ent11", account=acc1, transaction=tx1)
Entry(name="ent12", account=acc1, transaction=tx2)
acc2 = Account(name="acc2")
Entry(name="ent21", account=acc2, transaction=tx1)
Entry(name="ent22", account=acc2, transaction=tx2)
session.add(acc1)
session.flush()
session.expunge_all()
def go():
# load just the first Account. eager loading will actually load
# all objects saved thus far, but will not eagerly load the
# "accounts" off the immediate "entries"; only the "accounts" off
# the entries->transaction->entries
acc = (
session.query(Account)
.options(
sa.orm.joinedload("entries")
.joinedload("transaction")
.joinedload("entries")
.joinedload("account")
)
.order_by(Account.account_id)
).first()
# no sql occurs
eq_(acc.name, "acc1")
eq_(acc.entries[0].transaction.entries[0].account.name, "acc1")
eq_(acc.entries[0].transaction.entries[1].account.name, "acc2")
# lazyload triggers but no sql occurs because many-to-one uses
# cached query.get()
for e in acc.entries:
assert e.account is acc
self.assert_sql_count(testing.db, go, 1)
| |
# Derived from keras-rl
import opensim as osim
import numpy as np
import sys
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Input, concatenate, BatchNormalization
from keras.optimizers import Adam
from rl.agents import DDPGAgent
from rl.memory import SequentialMemory
from rl.random import OrnsteinUhlenbeckProcess
from osim.env import *
from osim.http.client import Client
from keras.optimizers import RMSprop
import argparse
import math
import scipy.io
# Command line parameters
parser = argparse.ArgumentParser(description='Train or test neural net motor controller')
parser.add_argument('--train', dest='train', action='store_true', default=True)
parser.add_argument('--test', dest='train', action='store_false', default=True)
parser.add_argument('--steps', dest='steps', action='store', default=10000, type=int)
parser.add_argument('--visualize', dest='visualize', action='store_true', default=False)
parser.add_argument('--model', dest='model', action='store', default="example.h5f")
parser.add_argument('--token', dest='token', action='store', required=False)
args = parser.parse_args()
# Load gait parameters
gait_path = '/home/ubuntu/still_params.mat'
gait_params = scipy.io.loadmat(gait_path)
tau_coeffs = gait_params['tau_coeffs_wrt_time'][0]
qd_coeffs = gait_params['qd_coeffs_wrt_tau']
qd_coeffs_wrt_time = gait_params['qd_coeffs_wrt_time']
dot_qd_coeffs = gait_params['dot_qd_coeffs_wrt_tau']
# order of these coeffs (according to Wen-Loong) is:
# 0. nonstance ankle
# 1. nonstance knee
# 2. nonstance hip
# 3. stance hip
# 4. stance knee
# 5. stance ankle
torso_coeffs = gait_params['torso_coeffs_wrt_tau'][0]
sf_talus_X_coeffs = gait_params['sf_talus_X_coeffs_wrt_tau'][0]
sf_talus_Y_coeffs = gait_params['sf_talus_Y_coeffs_wrt_tau'][0]
nsf_talus_X_coeffs = gait_params['nsf_talus_X_coeffs_wrt_tau'][0]
nsf_talus_Y_coeffs = gait_params['nsf_talus_Y_coeffs_wrt_tau'][0]
sf_toe_X_coeffs = gait_params['sf_toe_X_coeffs_wrt_tau'][0]
sf_toe_Y_coeffs = gait_params['sf_toe_Y_coeffs_wrt_tau'][0]
nsf_toe_X_coeffs = gait_params['nsf_toe_X_coeffs_wrt_tau'][0]
nsf_toe_Y_coeffs = gait_params['nsf_toe_Y_coeffs_wrt_tau'][0]
torso_com_X_coeffs = gait_params['torso_com_X_coeffs_wrt_tau'][0]
torso_com_Y_coeffs = gait_params['torso_com_Y_coeffs_wrt_tau'][0]
com_X_coeffs = gait_params['com_X_coeffs_wrt_tau'][0]
com_Y_coeffs = gait_params['com_Y_coeffs_wrt_tau'][0]
def eval_poly(coeffs, x):
"""
Evaluates polynomial of degree 4 at x
"""
res = 0
for i, c in enumerate(reversed(coeffs)):
res += c * np.power(x, i)
return res
cycle_length = 71
# nonzero_indices = [0, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
# 16, 17, 18, 19, 26, 27, 28, 29, 30, 31,
# 32, 33, 34, 35, 36, 37]
# nonzero_indices = [0, 6, 7, 8, 9, 10, 11, 18, 19, 26, 27, 28, 29, 30, 31,
# 32, 33, 34, 35]
nonzero_indices = [18, 19, 28, 29, 30, 31]
x_indices = [18, 26, 28, 30, 32, 34]
left_stance_obs = []
right_stance_obs = []
# construct observation maps for LEFT foot being stance foot
for i in range(cycle_length):
t = i * 0.01
tau = eval_poly(tau_coeffs, t)
obs = np.zeros(41)
# 0: rotation of pelvis
obs[0] = eval_poly(torso_coeffs, tau)
# 1, 2: x, y position of pelvis
# obs[1] = eval_poly(torso_com_X_coeffs, tau)
# obs[2] = eval_poly(torso_com_Y_coeffs, tau)
# 3, 4, 5: velocity (rotation, x, y) of pelvis
# 6-11: rotation of each ankle, knee, hip
# order is: ['hip_r','knee_r','ankle_r','hip_l','knee_l','ankle_l']
obs[6] = eval_poly(qd_coeffs[2], tau)
obs[7] = eval_poly(qd_coeffs[1], tau)
obs[8] = eval_poly(qd_coeffs[0], tau)
obs[9] = eval_poly(qd_coeffs[3], tau)
obs[10] = eval_poly(qd_coeffs[4], tau)
obs[11] = eval_poly(qd_coeffs[5], tau)
# 12-17: angular velocity of each ankle, knee, hip
obs[12] = eval_poly(dot_qd_coeffs[2], tau)
obs[13] = eval_poly(dot_qd_coeffs[1], tau)
obs[14] = eval_poly(dot_qd_coeffs[0], tau)
obs[15] = eval_poly(dot_qd_coeffs[3], tau)
obs[16] = eval_poly(dot_qd_coeffs[4], tau)
obs[17] = eval_poly(dot_qd_coeffs[5], tau)
# 18, 19: x, y of COM
obs[18] = eval_poly(com_X_coeffs, tau) - (-0.00388174517697 - -0.0697193532044)
obs[19] = eval_poly(com_Y_coeffs, tau) - (0.998920554125 - 0.970765639028)
# 20, 21: velocity of COM
# 22, 23: position of head
# 24, 25: position of pelvis
# 26, 27: position of torso
obs[26] = eval_poly(torso_com_X_coeffs, tau) - (-0.00162897940695 + 0.0965008489262)
obs[27] = eval_poly(torso_com_Y_coeffs, tau) - (1.26664398555 - 0.996431048568)
# 28, 29: position of left toe
obs[28] = eval_poly(sf_toe_X_coeffs, tau) - (0.13 - 0.00798)
obs[29] = eval_poly(sf_toe_Y_coeffs, tau) - 0.0274
# 30, 31: position of right toe
obs[30] = eval_poly(nsf_toe_X_coeffs, tau) - (0.13 - 0.00798)
obs[31] = eval_poly(nsf_toe_Y_coeffs, tau) - 0.0274
# 32, 33: position of left talus
obs[32] = eval_poly(sf_talus_X_coeffs, tau) - 0.119683331742
obs[33] = eval_poly(sf_talus_Y_coeffs, tau) - (0.0417232320769 - 0.0229523985286)
# 34, 35: position of right talus
obs[34] = eval_poly(nsf_talus_X_coeffs, tau) - 0.119683331742
obs[35] = eval_poly(nsf_talus_Y_coeffs, tau) - (0.0417232320769 - 0.0229523985286)
# 36, 37: strength of left and right psoas
obs[36] = 1
obs[37] = 1
# 38, 39: distance of next obstacle
# 40: radius of obstacle
left_stance_obs.append(obs)
# construct observation maps for RIGHT foot being stance foot
for i in range(cycle_length):
t = i * 0.01
tau = eval_poly(tau_coeffs, t)
obs = np.zeros(41)
# 0: rotation of pelvis
obs[0] = eval_poly(torso_coeffs, tau)
# 1, 2: x, y position of pelvis
# obs[1] = eval_poly(torso_com_X_coeffs, tau)
# obs[2] = eval_poly(torso_com_Y_coeffs, tau)
# 3, 4, 5: velocity (rotation, x, y) of pelvis
# 6-11: rotation of each ankle, knee, hip
# order is: ['hip_r','knee_r','ankle_r','hip_l','knee_l','ankle_l']
obs[6] = eval_poly(qd_coeffs[3], tau)
obs[7] = eval_poly(qd_coeffs[4], tau)
obs[8] = eval_poly(qd_coeffs[5], tau)
obs[9] = eval_poly(qd_coeffs[2], tau)
obs[10] = eval_poly(qd_coeffs[1], tau)
obs[11] = eval_poly(qd_coeffs[0], tau)
# 12-17: angular velocity of each ankle, knee, hip
obs[12] = eval_poly(dot_qd_coeffs[3], tau)
obs[13] = eval_poly(dot_qd_coeffs[4], tau)
obs[14] = eval_poly(dot_qd_coeffs[5], tau)
obs[15] = eval_poly(dot_qd_coeffs[2], tau)
obs[16] = eval_poly(dot_qd_coeffs[1], tau)
obs[17] = eval_poly(dot_qd_coeffs[0], tau)
# 18, 19: x, y of COM
obs[18] = eval_poly(com_X_coeffs, tau) - (-0.00388174517697 - -0.0697193532044)
obs[19] = eval_poly(com_Y_coeffs, tau) - (0.998920554125 - 0.970765639028)
# 20, 21: velocity of COM
# 22, 23: position of head
# 24, 25: position of pelvis
# 26, 27: position of torso
obs[26] = eval_poly(torso_com_X_coeffs, tau) - (-0.00162897940695 + 0.0965008489262)
obs[27] = eval_poly(torso_com_Y_coeffs, tau) - (1.26664398555 - 0.996431048568)
# 28, 29: position of left toe
obs[28] = eval_poly(nsf_toe_X_coeffs, tau) - (0.13 - 0.00798)
obs[29] = eval_poly(nsf_toe_Y_coeffs, tau) - 0.0274
# 30, 31: position of right toe
obs[30] = eval_poly(sf_toe_X_coeffs, tau) - (0.13 - 0.00798)
obs[31] = eval_poly(sf_toe_Y_coeffs, tau) - 0.0274
# 32, 33: position of left talus
obs[32] = eval_poly(nsf_talus_X_coeffs, tau) - 0.119683331742
obs[33] = eval_poly(nsf_talus_Y_coeffs, tau) - (0.0417232320769 - 0.0229523985286)
# 34, 35: position of right talus
obs[34] = eval_poly(sf_talus_X_coeffs, tau) - 0.119683331742
obs[35] = eval_poly(sf_talus_Y_coeffs, tau) - (0.0417232320769 - 0.0229523985286)
# 36, 37: strength of left and right psoas
obs[36] = 1
obs[37] = 1
# 38, 39: distance of next obstacle
# 40: radius of obstacle
right_stance_obs.append(obs)
# Load walking environment
env = RunEnv(args.visualize)
env.reset()
env.set_imitation(left_stance_obs, right_stance_obs, cycle_length, nonzero_indices, x_indices, 5)
nb_actions = env.action_space.shape[0]
# Total number of steps in training
nallsteps = args.steps
# ts1 = []
# ts2 = []
# training_set = []
# for i in range(1, 101):
# t = 0.005 * i
# tau = eval_poly(tau_coeffs, t)
# obs = np.zeros(env.observation_space.shape[0])
# # rotation of pelvis/torso
# obs[0] = eval_poly(torso_coeffs, t)
# # angular velocity of each ankle, knee, hip
# for j in range(6):
# obs[6+j] = eval_poly(qd_coeffs[j], t)
# obs[12+j] = eval_poly(dot_qd_coeffs[j], t)
# training_set.append(np.concatenate([np.random.random(size=nb_actions), obs]))
# for j in range(100):
# ts1.append(0.25 * np.random.random(size=nb_actions))
# ts2.append(obs)
# training_set = np.array(training_set)
# ts1 = np.array(ts1)
# ts2 = np.array(ts2)
# labels = 2 * np.ones(100 * len(training_set))
# Create networks for DDPG
# Next, we build a very simple model.
actor = Sequential()
actor.add(Flatten(input_shape=(1,) + env.observation_space.shape))
actor.add(Dense(32))
actor.add(Activation('relu'))
actor.add(Dense(32))
actor.add(Activation('relu'))
actor.add(Dense(32))
actor.add(Activation('relu'))
actor.add(Dense(nb_actions))
actor.add(Activation('sigmoid'))
#print(actor.summary())
action_input = Input(shape=(nb_actions,), name='action_input')
observation_input = Input(shape=(1,) + env.observation_space.shape, name='observation_input')
flattened_observation = Flatten()(observation_input)
x = concatenate([action_input, flattened_observation])
x = Dense(64)(x)
x = Activation('relu')(x)
x = Dense(64)(x)
x = Activation('relu')(x)
x = Dense(64)(x)
x = Activation('relu')(x)
x = Dense(1)(x)
x = Activation('linear')(x)
critic = Model(inputs=[action_input, observation_input], outputs=x)
#print(critic.summary())
# ts2 = ts2.reshape([10000, 1, 41])
# critic.compile(Adam(lr=0.001), loss='mean_squared_error', metrics=['accuracy'])
# critic.fit([ts1, ts2], labels, epochs=10)
# Set up the agent for training
memory = SequentialMemory(limit=1000000, window_length=1)
random_process = OrnsteinUhlenbeckProcess(theta=.15, mu=0., sigma=.15, size=env.noutput)
agent = DDPGAgent(nb_actions=nb_actions, actor=actor, critic=critic, critic_action_input=action_input,
memory=memory, nb_steps_warmup_critic=1000, nb_steps_warmup_actor=1000,
random_process=random_process, gamma=.99, target_model_update=1e-3,
delta_clip=1.)
agent.compile(Adam(lr=.001, clipnorm=1.), metrics=['mae'])
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
if args.train:
#agent.load_weights(args.model)
agent.fit(env, nb_steps=nallsteps, action_repetition=1, visualize=False, verbose=2, nb_max_episode_steps=env.timestep_limit, log_interval=10000)
# After training is done, we save the final weights.
agent.save_weights(args.model, overwrite=True)
# If TEST and TOKEN, submit to crowdAI
if not args.train and args.token:
agent.load_weights(args.model)
# Settings
remote_base = 'http://grader.crowdai.org:1729'
client = Client(remote_base)
# Create environment
observation = client.env_create(args.token)
# Run a single step
# The grader runs 3 simulations of at most 1000 steps each. We stop after the last one
while True:
v = np.array(observation).reshape((env.observation_space.shape[0]))
action = agent.forward(v)
[observation, reward, done, info] = client.env_step(action.tolist())
if done:
observation = client.env_reset()
if not observation:
break
client.submit()
# If TEST and no TOKEN, run some test experiments
if not args.train and not args.token:
agent.load_weights(args.model)
# Finally, evaluate our algorithm for 1 episode.
agent.test(env, nb_episodes=1, visualize=False, nb_max_episode_steps=500)
| |
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 10 22:05:56 2016
@author: Tobias Jachowski
"""
import collections
import numpy as np
import operator
from .region import Region
from .record import Record
from ..modification import Modification
class View(Region):
"""
Defines a View on a given Region (Record or View), the parent of that view.
The parent is an instance of a MultiRegion, which gives the options to add
even further parents (see MultiRegion).
The region of a View of the data to refer to can (in contrast to a Record)
be choosen by setter methods (start, stop, tmin, tmax).
"""
def __init__(self, parent=None, sticky_start=True, sticky_stop=True,
**kwargs):
"""
Parameters
----------
parent : Region
sticky_start : bool
sticky_stop : bool
**kwargs
Attributes
----------
records
record
views
modifications_apply
modifications_based
group
parent
calibration
num_traces
traces
samplingrate
start
stop
sticky_start : bool
If True and `self.start` has the same position as the parent's
start, `self.start` follows parent's start automatically, if it
is changed.
If False, only shrinkage of the parent's region (increasing start)
updates `self.start`, if necessary.
`self.sticky_start` is automatically set to False, if `self.start`
is set manually.
sticky_stop : bool
If True and `self.stop` has the same position as the parent's
stop, `self.stop` follows parent's stop automatically, if it
is changed.
If False, only shrinkage of the parent's region (decreasing stop)
updates `self.stop`, if necessary.
`self.sticky_stop` is automatically set to False, if `self.stop`
is set manually.
"""
super().__init__(**kwargs)
if parent is None:
raise TypeError("View missing 1 required positional argument: "
"`parent`")
# Create a new MultiRegion with `parent` as parent and add it to `self`
# as parent
multiregion = MultiRegion(parent=parent, name=self.name)
if not self.add_parent(multiregion):
raise ValueError("Could not add `self` as a child, most probably "
"circular reference detected.")
self.start = 0
self.stop = self.parent.datapoints
# Determine, whether start and stop (tmin and tmax) should follow the
# increase of the parent region or compensate for it to point to the
# same region.
self.sticky_start = sticky_start
self.sticky_stop = sticky_stop
@property
def records(self):
return self.parent.records
@property
def record(self):
return self.parent.record
@property
def views(self):
return self.parent.views
@property
def modifications_apply(self):
return self.modifications(key='apply')
@property
def modifications_based(self):
return self.modifications(key='based')
def modifications(self, key='apply'):
if key == 'apply':
return self.parent_instances(Modification)
else:
return self.child_instances(Modification)
@property
def group(self):
modifications = list(self.modifications_based)
if len(modifications) >= 1:
# one or more modification_based -> same group as first
# modification
return modifications[0].group
else:
# no modification_based -> own group, which could be the same but
# doesn't have to
return self._group
@group.setter
def group(self, group):
modifications = list(self.modifications_based)
if len(modifications) >= 1:
# one or more modification_based -> set group of first modification
modifications[0].group = group
else:
# no modification_based -> set own group
# self.group = None (or given group) is called in
# super().__init__(), which ensures initialisation of self._group
self._group = group
@property
def parent(self):
# MultiRegion is the parent of View
return self.first_ancestor_instance(MultiRegion, level=1, dft=False)
@property
def calibration(self):
return self.parent.calibration
@property
def num_traces(self):
if self.parent is None:
return 0
return self.parent.num_traces
@property
def traces(self):
if self.parent is None:
return []
return self.parent.traces
@property
def samplingrate(self):
if self.parent is None:
return 0.0
return self.parent.samplingrate
@property
def start(self):
return self._start
@property
def stop(self):
return self._stop
@start.setter
def start(self, start):
if not hasattr(self, '_start'):
# Initialise start, when called for the first time
self._start = start
else:
if start != self._start:
self.sticky_start = False
# Validate start to be within 0 and self.parent.datapoints
start = max(start, 0)
start = min(start, self.parent.datapoints)
# create shift for auto shift update of children
shift = (self, 'start', 0, start - self._start)
self._start = start
# inform children - views and modifications based on this
# view about change
self.set_changed(level=1, index_shift=shift)
@stop.setter
def stop(self, stop):
if not hasattr(self, '_stop'):
# Initialise stop, when called for the first time
self._stop = stop
else:
if stop != self._stop:
self.sticky_stop = False
# Validate stop to be within 0 and self.parent.datapoints
stop = max(stop, 0)
stop = min(stop, self.parent.datapoints)
# create shift for auto shift update of children
shift = (self,
'stop',
self._stop - self._start,
stop - self._stop)
self._stop = stop
# inform children - views and modifications based on this
# view about change
self.set_changed(level=1, index_shift=shift)
def member_changed(self, ancestor=True, calledfromself=False,
index_shift=None, **kwargs):
# If a change of an ancestor View or a MultiRegion was triggered by an
# index_shift, check, if this index_shift of the ancestor necessitates
# an update of self.stop or self.start. If yes, then set self.stop or
# self.start accordingly and implicitly inform further descendants of
# the change (The children of a View or a MultiRegion are informed upon
# an index_shift, see call of set_changed() in property start/stop).
# A change of descendants should be ignored.
if index_shift is not None and not calledfromself and ancestor:
if not self._update_index(index_shift):
# It was not necessary to change start and stop, this means,
# the data this View offers was not changed and in turn the
# cached data does not have to be recalculated -> return and do
# not trigger the recaching nor set the self.updated to False
# (-> do not call super.member_changed())
return
# Set or unset cached data and update update status
super().member_changed(ancestor=ancestor,
calledfromself=calledfromself, **kwargs)
def _update_index(self, index_shift):
"""
If the parent has changed its indexes, check, whether the own indexes
need to be adjusted, too and inform child about index_shift, if
necessary.
Parameters
----------
index_shift : tuple of (Region, str, int, int)
The tuple contains: The caller Region that caused the index shift,
the name of which index was changed (start/stop), the original
index of the position where the change took place and the amount by
which start or stop were shifted (caller, name, index, shift).
Returns
-------
bool
When `self.start` or `self.stop` had to be updated, return True,
otherwise False.
"""
# Get the start/stop shift from the parent
caller, name, index, shift = index_shift
# print("%s -> %s" %(caller.name, self.name))
# print("%s, %s, %s" % (name, index, shift))
# Initialize start and stop, in case there is now shift
start = self._start
stop = self._stop
returnvalue = False
if name == 'start':
# There are three possible cases for self._start:
# 1. start > index
# change start
# if shift > start - index:
# change start AND inform child
# 2. start == index
# if shift is positive:
# leave start, inform child
# if shift is negative:
# if sticky: leave start, inform child
# else: change start
# 3. start < index
# leave start, inform child
#
# There is one case, where self._stop is changed:
# 1. stop > index
# change stop
# Change self._start
if self.sticky_start:
op = operator.gt # start > index is sticky
else:
op = operator.ge # start >= index is non sticky
if op(self._start, index):
# Validate start to be at least as great as start of parent
start = max(self._start - shift, index)
# Change self._stop
# and keep self._stop > index (start)
if self._stop > index:
# Validate stop to be at min as great as start + 1 of parent
stop = max(self._stop - shift, index + 1)
# Calculate index_shift for child
_index = max(index - self._start, 0)
_shift = max(shift - max(self._start - index, 0), 0)
if self._start == index and self.sticky_start:
_shift = shift
# Set changed start and stop
if self._start != start or self._stop != stop:
# print(" Setting start in %s to %s" % (self.name, start))
# print(" Setting stop in %s to %s" % (self.name, stop))
# Set start and/or stop
self._start = start
self._stop = stop
returnvalue = True
# Inform child about 'start' index shift, if self._start and
# self._stop do not compensate for the index shift
if _shift != 0:
i_shift = (self, name, _index, _shift)
self.set_changed(level=1, index_shift=i_shift,
includeself=False)
else: # name == 'stop':
# Change self._start
if self._start >= index:
# This start follows the parent's stop index change
start = self._start + shift
else: # self._start < self.parent.
# Keep self._start < index (stop)
# Validate start to be at max as great as stop - 1 of parent
start = min(self._start, index + shift - 1)
# Change self._stop
if self.sticky_stop:
op = operator.ge # >= is sticky
else:
op = operator.gt # > is non sticky
if op(self._stop, index):
# This start follows the parent's stop index change
stop = self._stop + shift
else: # self._stop still could be after parent's start, therefore:
stop = min(self._stop, index + shift)
# Calculate index_shift for child
_index = max(index - self._start, 0)
_shift = min(shift + max(index - self._stop, 0), 0)
if self._stop == index and self.sticky_stop:
_shift = shift
# Set changed start and stop
if self._start != start or self._stop != stop:
# print(" Setting start in %s to %s" % (self.name, start))
# print(" Setting stop in %s to %s" % (self.name, stop))
# Set start and/or stop
self._start = start
self._stop = stop
returnvalue = True
# Inform child about 'stop' index shift, if self._start and
# self._stop do not (fully) compensate for the index shift
if _index != 0 and _shift != 0:
i_shift = (self, name, _index, _shift)
self.set_changed(level=1, index_shift=i_shift,
includeself=False)
return returnvalue
def _get_data_uncached(self, samples, traces_idx, copy):
"""
Return uncached data, i.e.:
Correct samples such that they relate to self.parent.datapoints, rather
than self.datapoints, i.e. shift index by self.start.
Get data from parent.
Apply all modifications registered at this View.
Return data.
"""
# parentize the requested samples, i.e. correct for self.start
if isinstance(samples, slice):
p_start = samples.start + self.start
p_stop = samples.stop + self.start
p_step = samples.step
p_samples = slice(p_start, p_stop, p_step)
else:
# samples is an np.array
p_samples = samples + self.start
data = self.parent._get_data(p_samples, traces_idx, copy)
# modify data by applying all modifications to the unmodified data
for mod in self.modifications():
data = mod.modify(data, samples, traces_idx)
return data
class MultiRegion(Region):
"""
Holds references to multi parent instances (Region), offers methods
to add and remove a parent and a method to serve the data of all parents
concatenated. Inherits from class Region.
"""
def __init__(self, parent=None, **kwargs):
"""
Parameters
----------
parent : Region or Iterable of Regions
**kwargs
Attributes
----------
group
start
stop
records
views
record
calibration
num_traces
traces
samplingrate
"""
super().__init__(max_children=1, **kwargs)
if parent is None:
raise TypeError("Parent missing 1 required positional argument: "
"'parent'")
if isinstance(parent, collections.Iterable):
for p in parent:
self.add_parent(p)
else:
self.add_parent(parent)
def member_changed(self, ancestor=True, calledfromself=False,
index_shift=None, **kwargs):
# Possible causes of an index shift:
# 1. index shift of parent
# 2. adding or removing a parent
# This method is called in case 1. For handling of case 2 see
# methods self.add_parent() and self.remove_parent().
#
# If a change of an ancestor View or a MultiRegion was triggered by an
# index_shift, check, if this index_shift of the ancestor necessitates
# an update of self.stop or self.start. If yes, then set self.stop or
# self.start accordingly and implicitly inform further descendants of
# the change (only the children of a View or a MultiRegion (level=1)
# are informed upon an index_shift, see call of set_changed() in
# property start/stop).
# A change of descendants should be ignored.
if index_shift is not None and not calledfromself and ancestor:
caller, name, index, shift = index_shift
# print("%s -> %s" %(caller.name, self.name))
# print("%s, %s, %s" % (name, index, shift))
# Find the start index of the changed caller
start = 0
for parent in self.parents:
if parent is caller:
break
start += parent.datapoints
# Adjust index according to start index of parent/caller
index = start + index
# Create new index_shift based on old one
index_shift = (self, name, index, shift)
# Inform children - usually views - based on this MultiRegion about
# index shift change
self.set_changed(level=1, index_shift=index_shift,
includeself=False)
# Set or unset cached data and update update status
super().member_changed(ancestor=ancestor,
calledfromself=calledfromself, **kwargs)
@property
def group(self):
try:
return self.child.group
except:
return None
@group.setter
def group(self, group):
try:
self.child.group = group
except:
pass
@property
def start(self):
return 0
@property
def stop(self):
# the sum of all parent.datapoints
datapoints = 0
for parent in self.parents:
datapoints += parent.datapoints
return datapoints
@property
def records(self):
"""
Return all parents of instance Record as a generator.
"""
return self.parent_instances(Record)
@property
def views(self):
"""
Return all parents of instance View as a generator.
"""
return self.parent_instances(View)
@property
def record(self):
"""
Returns the first record that view and subsequently that view's parents
are based on.
"""
# TODO: implement better decision, which record to return, depending on
# the parents; this decision influences the properties: calibration,
# samplingrate, and num_traces
# Presently, only views/records can be added as parent, whose
# calibration, samplingrate, and num_traces are equal to this view
return self.first_ancestor_instance(Record)
@property
def calibration(self):
# TODO: Views combine the raw signals -> no handling of different
# calibration in View meaningful -> Create higher grouping system which
# gives access to calibrated distances/extensions/forces, only, but not
# the raw signal -> groups of Results? -> shift Calibration of Record
# to Reults
return self.record.calibration
@property
def num_traces(self):
if self.record is None:
return 0
# TODO: Implemement handling of different num_traces: Allow only the
# overlap of different traces to be used?
return self.record.num_traces
@property
def traces(self):
if self.record is None:
return []
return self.record.traces
@property
def samplingrate(self):
if self.record is None:
return 0.0
# TODO: Implement handling of different samplingrate: up/down sampling?
# Pandas? Every parent has its own samplingrate.
return self.record.samplingrate
def add_parent(self, region, index=None, after=None, before=None,
update_indices=True):
"""
Priority of inserting the new region at a specific index in descending
order:
#. index
#. after
#. before
Returns
-------
bool
True if parent could be added or parent is already present in
self._parents. Otherwise return False.
"""
if region is None:
return False
# TODO: Implement possibility of handling different calibrations,
# samplingrate, and num_traces
# Check, if there is already at least one view existent and if so,
# make sure the new parent is based on the same calibration, has
# the same samplingrate and the same number of traces
if (next(self.parents, None) is not None
and (self.calibration is not region.calibration
or self.samplingrate != region.samplingrate
or self.num_traces != region.num_traces)):
return False
# Possible causes of an index shift:
# 1. index shift of parent
# 2. adding or removing a parent
# This method is called in case 2. For handling of case 1 see method
# self.member_changed().
# Add parent.
# Adding a parent usually causes a change of the children. Therefore,
# super().add_parent() would call set_changed() itself, without any
# parameters. This means, if a parent was added, set_changed() would
# trigger a recalculation of Modification, regardless of whether the
# change affected the region selected by the child View, or not.
# However, adding a parent to a View only causes an index shift.
# Therefore, disable generic set_changed() mechanism and initiate a
# specific one with the index_shift information.
if region in self.parents:
# Region is already in parents.
# Avoid informing the children about index_shift.
# This is necessary, because further down super().add_parent()
# would return True, and would therefore not prevent the
# information about the index_shift.
print("Region %s already referenced." % region)
return True
if not super().add_parent(region, index=index, after=after,
before=before, set_changed=False):
return False
index_shift = None
if update_indices:
# Determine index, where insertion took place.
index = 0
for parent in self.parents:
if parent is region:
break
index += parent.datapoints
# Create index_shift: (caller, name, index, shift)
# Virtually, adding a region, is as if one would shift the stop of the
# preceding region by region.datapoints.
index_shift = (self, 'stop', index, region.datapoints)
# Inform children about index_shift
self.set_changed(level=1, index_shift=index_shift)
return True
def remove_parent(self, region, update_indices=True):
if region is None:
return False
# Possible causes of an index shift:
# 1. index shift of parent
# 2. adding or removing a parent
# This function is called in case 2. For handling of case 1 see method
# self.member_changed().
# Determine index, where removal will take place.
index = 0
for parent in self.parents:
if parent is region:
break
index += parent.datapoints
# Remove parent
# super().remove_parent() would call set_changed() itself, without any
# parameters. This means, if a parent was removed, set_changed() would
# trigger a recalculation of Modification, regardless of whether the
# change affected the region selected by the child View, or not.
# Therefore, disable generic set_changed() mechanism and initiate a
# specific one with the index_shift information.
if not super().remove_parent(region, set_changed=False):
return False
index_shift = None
if update_indices:
# Create index shift
# Virtually, removing a region is as if one would shift the start of a
# following region by region.datapoints.
index_shift = (self, 'start', index, region.datapoints)
# Inform children about index_shift,
self.set_changed(level=1, index_shift=index_shift)
return True
def _get_data_uncached(self, samples, traces_idx, copy=True):
"""
Return concatenated data from all parents.
"""
# determine num_traces
if isinstance(traces_idx, slice):
stop = traces_idx.stop
if stop < 0:
stop = stop + len(self.traces)
num_traces = int(np.ceil((stop - traces_idx.start)
/ traces_idx.step))
else:
# traces is an np.array
num_traces = len(traces_idx)
# determine the global boundaries of requested samples and datapoints
if isinstance(samples, slice):
s_start = samples.start
s_stop = samples.stop
s_step = samples.step
datapoints = int(np.ceil((s_stop - s_start) / s_step))
else:
# samples is an np.array
s_start = samples.min()
s_stop = samples.max() + 1
# datapoints = len(samples)
# datapoints from every single parent need to be appended on the
# fly (np.vstack)
datapoints = 0
# Go through all parents and get data from all parents, whose
# datapoints fall within the requested samples region.
# Initialize running indices start and stop, which keep track of
# samples served by every single parent.
start = 0
stop = 0
# initialize array, which will hold all data from parents
data = np.empty((datapoints, num_traces))
for parent in self.parents:
# set lower bound of index of data served by current parent
# -> start of current parent is stop of previous parent
start = stop
# set upper bound of index of data served by current parent
# -> stop of current parent is:
stop = start + parent.datapoints
# Check, whether data of current parent is requested by samples
if start < s_stop and stop > s_start:
# create sub fraction of samples for current parent
if isinstance(samples, slice):
# treat steps/decimate
if start <= s_start:
# requested data served by the first parent -> s_start
# is index of first datapoint -> no shift, correct
# sub_start by start
req_start = 0
sub_start = s_start - start
else:
# requested data served by a subsequent parent -> start
# is index of first datapoint -> shift start of data
# according to steps left over by num_samples % s_step
# either some steps left over -> shift is ->
# shift = s_step - steps_left_over
# or if no stpes left over -> no shift ->
# shift = s_step - s_step
shift = s_step - ((start - s_start) % s_step or s_step)
req_start = (start - s_start + shift) / s_step
sub_start = shift
req_stop = np.ceil((min(stop, s_stop) - s_start) / s_step)
sub_stop = min(stop, s_stop) - start
requested = slice(int(req_start), int(req_stop))
sub_samples = slice(sub_start, sub_stop, s_step)
# get data from parent
data[requested] = parent._get_data(sub_samples, traces_idx,
copy)
else:
# samples is an np.array:
# minimum index can be equal to start (0) or greater
# maximum index has to be lesser than stop (datapoints)
requested = np.logical_and(samples >= start,
samples < stop)
sub_samples = samples[requested] - start
# get data from parent and append to data
p_data = parent._get_data(sub_samples, traces_idx, copy)
data = np.vstack((data, p_data))
return data
| |
import re
from operator import attrgetter
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.core.exceptions import PermissionDenied
from django.db.models import Q
from django.forms import inlineformset_factory
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404, render, redirect
from django.views import generic
from requests import HTTPError
import urlparse
from django.urls import reverse
from social.app.forms.author import FindRemoteAuthorForm
from social.app.forms.user_profile import UserFormUpdate
from social.app.models.author import Author
from social.app.models.node import Node
from social.app.models.post import Post
from social.app.models.post import (get_all_public_posts, get_all_friend_posts, get_all_foaf_posts,
get_all_remote_node_posts, get_all_local_private_posts)
def create_author_uri(author):
author_host = author.node.host
author_service_url = author.node.service_url
protocol = urlparse.urlparse(author_service_url).scheme + "://"
author_path = reverse('app:authors:detail', kwargs={'pk': author.id})
author_uri = protocol + author_host + author_path
return author_uri
def get_posts_by_author(request, pk):
"""
Get /authors/<author_guid>/posts/
"""
current_user = request.user
author = Author.objects.get(id=pk)
author_guid = str(pk)
current_author = Author.objects.get(user=request.user.id)
current_author_guid = str(current_author.id)
context = dict()
context['show_add_post_button'] = "false"
# Current user views their own posts
if current_user.is_authenticated() and current_author_guid == author_guid:
context['user_posts'] = \
(Post.objects
.filter(author__id=current_user.profile.id)
.filter(content_type__in=[x[0] for x in Post.TEXT_CONTENT_TYPES])
.filter(unlisted=False)
.order_by('-published'))
context['show_add_post_button'] = "true"
return render(request, 'app/index.html', context)
# Current user views another author's posts
elif current_user.is_authenticated():
# Case V: Get other node posts
# TODO: need to filter these based on remote author's relationship to current user.
get_all_remote_node_posts()
# case I: posts.visibility=public
public_posts = get_all_public_posts()
public_posts = public_posts.filter(author__id=author.id)
# case II: posts.visibility=friends
friend_posts = get_all_friend_posts(current_author) \
.filter(author__id=author.id) \
.filter(~Q(author__id=current_user.profile.id))
# case III: posts.visibility=foaf
foaf_posts = get_all_foaf_posts(current_author) \
.filter(~Q(author__id=current_user.profile.id)) \
.filter(author__id=author.id)
# TODO: case IV: posts.visibility=private
# case IV: posts.visibility=private
author_uri = create_author_uri(current_author)
private_local_posts = get_all_local_private_posts() \
.filter(Q(visible_to_author__uri=author_uri))
posts = ((public_posts |
friend_posts |
foaf_posts |
private_local_posts)
.filter(Q(author__node__local=False) | Q(content_type__in=[x[0] for x in Post.TEXT_CONTENT_TYPES]))
.filter(unlisted=False)
.distinct())
context["user_posts"] = sorted(posts, key=attrgetter('published'), reverse=True)
return render(request, 'app/index.html', context)
# Not authenticated
else:
context['user_posts'] = Post.objects \
.filter(author__id=author.id) \
.filter(visibility="PUBLIC").order_by('-published')
return render(request, 'app/index.html', context)
@login_required
def edit_user(request, pk):
user = get_object_or_404(User, pk=pk)
user_form = UserFormUpdate(instance=user)
profile_inline_formset = inlineformset_factory(
User, Author,
fields=('displayName', 'github', 'bio'))
formset = profile_inline_formset(instance=user)
formset.can_delete = False
if request.user.is_authenticated() and request.user.id == user.id:
if request.method == "POST":
user_form = UserFormUpdate(request.POST, request.FILES, instance=user)
formset = profile_inline_formset(request.POST, request.FILES, instance=user)
if user_form.is_valid():
created_user = user_form.save(commit=False)
formset = profile_inline_formset(request.POST, request.FILES, instance=created_user)
if formset.is_valid():
created_user.save()
formset.save()
messages.success(request, 'Your profile has been updated successfully!', extra_tags='alert-success')
return HttpResponseRedirect('/accounts/' + str(user.id))
else:
messages.error(request, 'Oops! There was a problem updating your profile!',
extra_tags='alert-danger')
return render(request, "account/account_update.html", {
"noodle": pk,
"noodle_form": user_form,
"formset": formset,
})
else:
raise PermissionDenied
class AuthorListView(generic.ListView):
model = Author
template_name = 'app/authors_list.html'
context_object_name = 'all_authors'
def get_queryset(self):
return Author.objects.all().order_by('-displayName')
def get_context_data(self, **kwargs):
context = super(AuthorListView, self).get_context_data(**kwargs)
context['show_remote_find_link'] = Node.objects.filter(local=False)
return context
class AuthorDetailView(generic.DetailView):
model = Author
def get_object(self, queryset=None):
try:
author = super(AuthorDetailView, self).get_object(queryset)
except Http404:
author = None
author_id = self.kwargs["pk"]
fetched_new_author = False
if author is None:
# No Author found -- so let's go ask our remote Nodes if they've got it
for node in Node.objects.filter(local=False):
try:
author = node.create_or_update_remote_author(author_id)
except HTTPError:
# Something's wrong with this node, so let's skip it
continue
if author is not None:
# Found it!
fetched_new_author = True
break
if author is None:
# If we got here, no one has it
raise Http404()
if not author.node.local and not fetched_new_author:
try:
# Let's go get the latest version if we didn't already fetch it above
updated_author = author.node.create_or_update_remote_author(author_id)
except HTTPError:
# Remote server failed in a way that wasn't a 404, so let's just display our cached version
return author
if updated_author is None:
# Well, looks like they deleted this author. Awkward.
author.delete()
raise Http404()
else:
author = updated_author
return author
def get_context_data(self, **kwargs):
context = super(AuthorDetailView, self).get_context_data(**kwargs)
if self.request.user.is_authenticated():
logged_in_author = self.request.user.profile
detail_author = context["object"]
context['show_follow_button'] = logged_in_author.can_follow(detail_author)
context['show_unfollow_button'] = logged_in_author.follows(detail_author)
context['show_friend_request_button'] = logged_in_author.can_send_a_friend_request_to(detail_author)
context['outgoing_friend_request_for'] = logged_in_author.has_outgoing_friend_request_for(detail_author)
context['incoming_friend_request_from'] = logged_in_author.has_incoming_friend_request_from(detail_author)
context['is_friends'] = logged_in_author.friends_with(detail_author)
else:
context['show_follow_button'] = False
context['show_unfollow_button'] = False
context['show_friend_request_button'] = False
context['outgoing_friend_request_for'] = False
context['incoming_friend_request_from'] = False
context['is_friends'] = False
return context
def find_remote_author(request):
"""
Source: https://docs.djangoproject.com/en/1.10/topics/forms/#the-form-class (2017-04-09)
"""
if request.method == "POST":
form = FindRemoteAuthorForm(request.POST)
if form.is_valid():
uri = form.cleaned_data['uri']
match = re.match(FindRemoteAuthorForm.URI_REGEX, uri)
(host, pk) = (match.group('host'), match.group('pk'))
author = None
try:
node = Node.objects.get(host=host)
if node.local:
author = Author.objects.get(id=pk)
else:
author = node.create_or_update_remote_author(pk)
except Node.DoesNotExist:
form.add_error('uri', "The remote Author's node was not found. Please contact the system "
"administrator and ask them to add their Node.")
except Author.DoesNotExist:
form.add_error('uri', "Author not found.")
except HTTPError:
form.add_error('uri', "Problem connecting to the remote Node. Please try again later.")
if form.is_valid():
# Need to check again to see if any errors got added
return redirect('app:authors:detail', pk=author.id)
else:
form = FindRemoteAuthorForm()
return render(request, 'app/find_remote_author.html', {'form': form})
| |
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import print_function, unicode_literals
import re
import requests
import requests.exceptions
from jinja2.exceptions import TemplateSyntaxError
import frappe
from frappe import _
from frappe.utils import get_datetime, now, strip_html, quoted
from frappe.utils.jinja import render_template
from frappe.website.doctype.website_slideshow.website_slideshow import get_slideshow
from frappe.website.router import resolve_route
from frappe.website.utils import (extract_title, find_first_image, get_comment_list,
get_html_content_based_on_type)
from frappe.website.website_generator import WebsiteGenerator
from frappe.utils.safe_exec import safe_exec
class WebPage(WebsiteGenerator):
def validate(self):
self.validate_dates()
self.set_route()
if not self.dynamic_route:
self.route = quoted(self.route)
def get_feed(self):
return self.title
def on_update(self):
super(WebPage, self).on_update()
def on_trash(self):
super(WebPage, self).on_trash()
def get_context(self, context):
context.main_section = get_html_content_based_on_type(self, 'main_section', self.content_type)
context.source_content_type = self.content_type
context.title = self.title
if self.context_script:
_locals = dict(context = frappe._dict())
safe_exec(self.context_script, None, _locals)
context.update(_locals['context'])
self.render_dynamic(context)
# if static page, get static content
if context.slideshow:
context.update(get_slideshow(self))
if self.enable_comments:
context.comment_list = get_comment_list(self.doctype, self.name)
context.update({
"style": self.css or "",
"script": self.javascript or "",
"header": self.header,
"text_align": self.text_align,
})
if not self.show_title:
context["no_header"] = 1
self.set_metatags(context)
self.set_breadcrumbs(context)
self.set_title_and_header(context)
self.set_page_blocks(context)
return context
def render_dynamic(self, context):
# dynamic
is_jinja = context.dynamic_template or "<!-- jinja -->" in context.main_section
if is_jinja or ("{{" in context.main_section):
try:
context["main_section"] = render_template(context.main_section, context)
if not "<!-- static -->" in context.main_section:
context["no_cache"] = 1
except TemplateSyntaxError:
if is_jinja:
raise
def set_breadcrumbs(self, context):
"""Build breadcrumbs template """
if self.breadcrumbs:
context.parents = frappe.safe_eval(self.breadcrumbs, { "_": _ })
if not "no_breadcrumbs" in context:
if "<!-- no-breadcrumbs -->" in context.main_section:
context.no_breadcrumbs = 1
def set_title_and_header(self, context):
"""Extract and set title and header from content or context."""
if not "no_header" in context:
if "<!-- no-header -->" in context.main_section:
context.no_header = 1
if not context.title:
context.title = extract_title(context.main_section, context.path_name)
# header
if context.no_header and "header" in context:
context.header = ""
if not context.no_header:
# if header not set and no h1 tag in the body, set header as title
if not context.header and "<h1" not in context.main_section:
context.header = context.title
# add h1 tag to header
if context.get("header") and not re.findall("<h.>", context.header):
context.header = "<h1>" + context.header + "</h1>"
# if title not set, set title from header
if not context.title and context.header:
context.title = strip_html(context.header)
def set_page_blocks(self, context):
if self.content_type != 'Page Builder':
return
out = get_web_blocks_html(self.page_blocks)
context.page_builder_html = out.html
context.page_builder_scripts = out.scripts
context.page_builder_styles = out.styles
def add_hero(self, context):
"""Add a hero element if specified in content or hooks.
Hero elements get full page width."""
context.hero = ""
if "<!-- start-hero -->" in context.main_section:
parts1 = context.main_section.split("<!-- start-hero -->")
parts2 = parts1[1].split("<!-- end-hero -->")
context.main_section = parts1[0] + parts2[1]
context.hero = parts2[0]
def check_for_redirect(self, context):
if "<!-- redirect:" in context.main_section:
frappe.local.flags.redirect_location = \
context.main_section.split("<!-- redirect:")[1].split("-->")[0].strip()
raise frappe.Redirect
def set_metatags(self, context):
if not context.metatags:
context.metatags = {
"name": self.meta_title or self.title,
"description": self.meta_description,
"image": self.meta_image or find_first_image(context.main_section or ""),
"og:type": "website"
}
def validate_dates(self):
if self.end_date:
if self.start_date and get_datetime(self.end_date) < get_datetime(self.start_date):
frappe.throw(_("End Date cannot be before Start Date!"))
# If the current date is past end date, and
# web page is published, empty the end date
if self.published and now() > self.end_date:
self.end_date = None
frappe.msgprint(_("Clearing end date, as it cannot be in the past for published pages."))
def check_publish_status():
# called via daily scheduler
web_pages = frappe.get_all("Web Page", fields=["name", "published", "start_date", "end_date"])
now_date = get_datetime(now())
for page in web_pages:
start_date = page.start_date if page.start_date else ""
end_date = page.end_date if page.end_date else ""
if page.published:
# Unpublish pages that are outside the set date ranges
if (start_date and now_date < start_date) or (end_date and now_date > end_date):
frappe.db.set_value("Web Page", page.name, "published", 0)
else:
# Publish pages that are inside the set date ranges
if start_date:
if not end_date or (end_date and now_date < end_date):
frappe.db.set_value("Web Page", page.name, "published", 1)
def check_broken_links():
cnt = 0
for p in frappe.db.sql("select name, main_section from `tabWeb Page`", as_dict=True):
for link in re.findall('href=["\']([^"\']*)["\']', p.main_section):
if link.startswith("http"):
try:
res = requests.get(link)
except requests.exceptions.SSLError:
res = frappe._dict({"status_code": "SSL Error"})
except requests.exceptions.ConnectionError:
res = frappe._dict({"status_code": "Connection Error"})
if res.status_code!=200:
print("[{0}] {1}: {2}".format(res.status_code, p.name, link))
cnt += 1
else:
link = link[1:] # remove leading /
link = link.split("#")[0]
if not resolve_route(link):
print(p.name + ":" + link)
cnt += 1
print("{0} links broken".format(cnt))
def get_web_blocks_html(blocks):
'''Converts a list of blocks into Raw HTML and extracts out their scripts for deduplication'''
out = frappe._dict(html='', scripts=[], styles=[])
extracted_scripts = []
extracted_styles = []
for block in blocks:
web_template = frappe.get_cached_doc('Web Template', block.web_template)
rendered_html = frappe.render_template('templates/includes/web_block.html', context={
'web_block': block,
'web_template_html': web_template.render(block.web_template_values),
'web_template_type': web_template.type
})
html, scripts, styles = extract_script_and_style_tags(rendered_html)
out.html += html
if block.web_template not in extracted_scripts:
out.scripts += scripts
extracted_scripts.append(block.web_template)
if block.web_template not in extracted_styles:
out.styles += styles
extracted_styles.append(block.web_template)
return out
def extract_script_and_style_tags(html):
from bs4 import BeautifulSoup
soup = BeautifulSoup(html, "html.parser")
scripts = []
styles = []
for script in soup.find_all('script'):
scripts.append(script.string)
script.extract()
for style in soup.find_all('style'):
styles.append(style.string)
style.extract()
return str(soup), scripts, styles
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.