gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
from datetime import timedelta, datetime
import freezegun
import os
from unittest import TestCase
from unittest.mock import MagicMock, patch
from kobin.requests import (
Request, _split_into_mimetype_and_priority, _parse_and_sort_accept_header, accept_best_match
)
from kobin.responses import BaseResponse
TEMPLATE_DIRS = [os.path.join(os.path.dirname(__file__), 'templates')]
class RequestTests(TestCase):
def test_initialized(self):
env = {'hoge': 'HOGE'}
request = Request(env)
self.assertEqual(request['hoge'], 'HOGE')
def test_get(self):
request = Request({'hoge': 'HOGE'})
self.assertEqual(request.get('hoge'), 'HOGE')
def test_getitem(self):
request = Request({'hoge': 'HOGE'})
self.assertEqual(request['hoge'], 'HOGE')
def test_get_default_value(self):
request = Request({})
self.assertEqual(request.get('hoge', 'HOGE'), 'HOGE')
def test_path_property(self):
request = Request({'PATH_INFO': '/hoge'})
self.assertEqual(request.path, '/hoge')
def test_path_property_stripped_last_slash(self):
request = Request({'PATH_INFO': 'hoge'})
self.assertEqual(request.path, '/hoge')
def test_method_name_to_uppercase(self):
self.assertEqual(Request({'REQUEST_METHOD': 'get'}).method, 'GET')
self.assertEqual(Request({'REQUEST_METHOD': 'Post'}).method, 'POST')
def test_POST_a_parameter(self):
wsgi_input_mock = MagicMock()
wsgi_input_mock.read.return_value = b'key1=value1'
request = Request({
'REQUEST_METHOD': 'POST',
'QUERY_STRING': '',
'wsgi.input': wsgi_input_mock,
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(b'key1=value1'),
})
self.assertEqual(request.forms['key1'], 'value1')
def test_POST_parameters(self):
wsgi_input_mock = MagicMock()
wsgi_input_mock.read.return_value = b'key1=value1&key2=value2'
request = Request({
'REQUEST_METHOD': 'POST',
'QUERY_STRING': '',
'wsgi.input': wsgi_input_mock,
'CONTENT_TYPE': 'application/x-www-form-urlencoded',
'CONTENT_LENGTH': len(b'key1=value1&key2=value2'),
})
self.assertEqual(request.forms['key1'], 'value1')
self.assertEqual(request.forms['key2'], 'value2')
def test_GET_a_parameter(self):
request = Request({
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'key1=value1',
'CONTENT_TYPE': 'text/plain',
'CONTENT_LENGTH': '',
})
self.assertEqual(request.query['key1'], 'value1')
def test_GET_parameters(self):
request = Request({
'REQUEST_METHOD': 'GET',
'QUERY_STRING': 'key1=value1&key2=value2',
'CONTENT_TYPE': 'text/plain',
'CONTENT_LENGTH': '',
})
self.assertEqual(request.query['key1'], 'value1')
self.assertEqual(request.query['key2'], 'value2')
def test_raw_body(self):
wsgi_input_mock = MagicMock()
wsgi_input_mock.read.return_value = b'{"key1": "value1"}'
request = Request({
'REQUEST_METHOD': 'POST',
'QUERY_STRING': '',
'wsgi.input': wsgi_input_mock,
'CONTENT_TYPE': 'application/json',
'CONTENT_LENGTH': len(b'{"key1": "value1"}'),
})
self.assertEqual(request.raw_body, b'{"key1": "value1"}')
def test_raw_body_with_empty_string_content_length(self):
wsgi_input_mock = MagicMock()
wsgi_input_mock.read.return_value = b''
request = Request({
'REQUEST_METHOD': 'POST',
'QUERY_STRING': '',
'wsgi.input': wsgi_input_mock,
'CONTENT_TYPE': 'text/plain',
'CONTENT_LENGTH': '',
})
self.assertEqual(request.raw_body, b'')
def test_body(self):
wsgi_input_mock = MagicMock()
wsgi_input_mock.read.return_value = b'{"key1": "value1"}'
request = Request({
'REQUEST_METHOD': 'POST',
'QUERY_STRING': '',
'wsgi.input': wsgi_input_mock,
'CONTENT_TYPE': 'application/json',
'CONTENT_LENGTH': len(b'{"key1": "value1"}'),
})
self.assertEqual(request.body, '{"key1": "value1"}')
def test_json(self):
wsgi_input_mock = MagicMock()
wsgi_input_mock.read.return_value = b'{"key1": "value1"}'
request = Request({
'REQUEST_METHOD': 'POST',
'QUERY_STRING': '',
'wsgi.input': wsgi_input_mock,
'CONTENT_TYPE': 'application/json',
'CONTENT_LENGTH': len(b'{"key1": "value1"}'),
})
self.assertEqual(request.json["key1"], "value1")
def test_url(self):
request = Request({
'HTTP_X_FORWARDED_PROTO': 'http',
'QUERY_STRING': 'key1=value1&key2=value2',
'HTTP_X_FORWARDED_HOST': 'localhost',
'PATH_INFO': '/hoge',
})
actual = request.url
self.assertEqual(actual, "http://localhost/hoge?key1=value1&key2=value2")
def test_headers(self):
request = Request({'HTTP_FOO': 'Bar', 'QUERY_STRING': 'key1=value1'})
self.assertEqual(request.headers['FOO'], 'Bar')
class AcceptBestMatchTests(TestCase):
def test_split_into_mimetype_and_priority_without_priority(self):
item = 'text/*'
actual = _split_into_mimetype_and_priority(item)
expected = ('text/*', 1.0)
self.assertEqual(actual, expected)
def test_split_into_mimetype_and_priority_with_priority(self):
item = 'application/json;q=0.5'
actual = _split_into_mimetype_and_priority(item)
expected = ('application/json', 0.5)
self.assertEqual(actual, expected)
def test_parse_and_sort_accept_header(self):
accept_header = 'application/json;q=0.5, text/html'
actual = _parse_and_sort_accept_header(accept_header)
expected = [
('text/html', 1.0),
('application/json', 0.5)
]
self.assertEqual(actual, expected)
def test_best_match_without_priority(self):
accept_header = 'application/json, application/xml'
expected = 'application/json'
actual = accept_best_match(accept_header, ['application/json'])
self.assertEqual(actual, expected)
def test_best_match_with_priority(self):
accept_header = 'text/*;q=0.9, */;q=0.1, audio/mpeg, application/xml;q=0.'
expected = 'application/json'
actual = accept_best_match(accept_header, ['application/json'])
self.assertEqual(actual, expected)
def test_best_match_with_priority_and_wildcard(self):
accept_header = 'application/json;q=0.5, text/*, */*;q=0.1'
actual = accept_best_match(accept_header, ['application/json', 'text/plain'])
expected = 'text/plain'
self.assertEqual(actual, expected)
class CookieTests(TestCase):
# Set Cookie Tests in BaseResponse Class
def test_set_cookie(self):
response = BaseResponse()
response.set_cookie('foo', 'bar')
expected_set_cookie = ('Set-Cookie', 'foo=bar; Path=/')
self.assertIn(expected_set_cookie, response.headerlist)
def test_set_cookie_with_max_age(self):
response = BaseResponse()
response.set_cookie('foo', 'bar', max_age=timedelta(seconds=10), path=None)
expected_set_cookie = ('Set-Cookie', 'foo=bar; Max-Age=10')
self.assertIn(expected_set_cookie, response.headerlist)
def test_set_cookie_with_expires(self):
response = BaseResponse()
response.set_cookie('foo', 'bar', expires=datetime(2017, 1, 1, 0, 0, 0), path=None)
expected_set_cookie = ('Set-Cookie', 'foo=bar; expires=Sun, 01 Jan 2017 00:00:00 GMT')
self.assertIn(expected_set_cookie, response.headerlist)
def test_set_cookie_with_path(self):
response = BaseResponse()
response.set_cookie('foo', 'bar', path='/foo')
expected_set_cookie = ('Set-Cookie', 'foo=bar; Path=/foo')
self.assertIn(expected_set_cookie, response.headerlist)
# Get Cookie Tests in Request Class
def test_cookies_property_has_nothing(self):
request = Request({})
self.assertEqual(len(request.cookies), 0)
def test_cookies_property_has_an_item(self):
request = Request({'HTTP_COOKIE': 'foo="bar"'})
self.assertEqual(len(request.cookies), 1)
def test_get_cookie(self):
request = Request({'HTTP_COOKIE': 'foo="bar"'})
actual = request.get_cookie("foo")
expected = 'bar'
self.assertEqual(actual, expected)
# Delete Cookie Tests in Request Class
@freezegun.freeze_time('2017-01-01 00:00:00')
def test_delete_cookie(self):
response = BaseResponse()
response.delete_cookie('foo')
expected_set_cookie = (
'Set-Cookie',
'foo=""; expires=Sun, 01 Jan 2017 00:00:00 GMT; Max-Age=-1; Path=/')
self.assertIn(expected_set_cookie, response.headerlist)
# Get and Set Cookie Tests with secret
def test_set_cookie_with_secret(self):
response = BaseResponse()
response.set_cookie('foo', 'bar', secret='secretkey', path=None)
expected_set_cookie = ('Set-Cookie', 'foo="!VzhGFLGcW+5OMs1s4beLXaqFxAUwgHdWkH5fgapghoI='
'?gASVDwAAAAAAAACMA2Zvb5SMA2JhcpSGlC4="')
self.assertIn(expected_set_cookie, response.headerlist)
def test_get_cookie_with_secret(self):
request = Request({'HTTP_COOKIE': 'foo="!VzhGFLGcW+5OMs1s4beLXaqFxAUwgHdWkH5fgapghoI='
'?gASVDwAAAAAAAACMA2Zvb5SMA2JhcpSGlC4="'})
actual = request.get_cookie("foo", secret='secretkey')
expected = 'bar'
self.assertEqual(actual, expected)
@patch('kobin.app.current_config')
def test_set_cookie_with_secret_in_config(self, mock_current_config):
mock_current_config.return_value = "secretkey"
response = BaseResponse()
response.set_cookie('foo', 'bar', path=None)
expected_set_cookie = ('Set-Cookie', 'foo="!VzhGFLGcW+5OMs1s4beLXaqFxAUwgHdWkH5fgapghoI='
'?gASVDwAAAAAAAACMA2Zvb5SMA2JhcpSGlC4="')
self.assertIn(expected_set_cookie, response.headerlist)
| |
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import warnings
import appdirs
try:
from keystoneauth1 import loading
except ImportError:
loading = None
import yaml
from os_client_config import cloud_config
from os_client_config import defaults
from os_client_config import exceptions
from os_client_config import vendors
APPDIRS = appdirs.AppDirs('openstack', 'OpenStack', multipath='/etc')
CONFIG_HOME = APPDIRS.user_config_dir
CACHE_PATH = APPDIRS.user_cache_dir
UNIX_CONFIG_HOME = os.path.join(
os.path.expanduser(os.path.join('~', '.config')), 'openstack')
UNIX_SITE_CONFIG_HOME = '/etc/openstack'
SITE_CONFIG_HOME = APPDIRS.site_config_dir
CONFIG_SEARCH_PATH = [
os.getcwd(),
CONFIG_HOME, UNIX_CONFIG_HOME,
SITE_CONFIG_HOME, UNIX_SITE_CONFIG_HOME
]
YAML_SUFFIXES = ('.yaml', '.yml')
CONFIG_FILES = [
os.path.join(d, 'clouds' + s)
for d in CONFIG_SEARCH_PATH
for s in YAML_SUFFIXES
]
VENDOR_FILES = [
os.path.join(d, 'clouds-public' + s)
for d in CONFIG_SEARCH_PATH
for s in YAML_SUFFIXES
]
BOOL_KEYS = ('insecure', 'cache')
# NOTE(dtroyer): This turns out to be not the best idea so let's move
# overriding defaults to a kwarg to OpenStackConfig.__init__()
# Remove this sometime in June 2015 once OSC is comfortably
# changed-over and global-defaults is updated.
def set_default(key, value):
warnings.warn(
"Use of set_default() is deprecated. Defaults should be set with the "
"`override_defaults` parameter of OpenStackConfig."
)
defaults.get_defaults() # make sure the dict is initialized
defaults._defaults[key] = value
def get_boolean(value):
if type(value) is bool:
return value
if value.lower() == 'true':
return True
return False
def _get_os_environ():
ret = defaults.get_defaults()
environkeys = [k for k in os.environ.keys()
if k.startswith('OS_')
and not k.startswith('OS_TEST') # infra CI var
and not k.startswith('OS_STD') # infra CI var
]
# If the only environ key is region name, don't make a cloud, because
# it's being used as a cloud selector
if not environkeys or (
len(environkeys) == 1 and 'OS_REGION_NAME' in environkeys):
return None
for k in environkeys:
newkey = k[3:].lower()
ret[newkey] = os.environ[k]
return ret
def _auth_update(old_dict, new_dict):
"""Like dict.update, except handling the nested dict called auth."""
for (k, v) in new_dict.items():
if k == 'auth':
if k in old_dict:
old_dict[k].update(v)
else:
old_dict[k] = v.copy()
else:
old_dict[k] = v
return old_dict
class OpenStackConfig(object):
def __init__(self, config_files=None, vendor_files=None,
override_defaults=None, force_ipv4=None):
self._config_files = config_files or CONFIG_FILES
self._vendor_files = vendor_files or VENDOR_FILES
config_file_override = os.environ.pop('OS_CLIENT_CONFIG_FILE', None)
if config_file_override:
self._config_files.insert(0, config_file_override)
self.defaults = defaults.get_defaults()
if override_defaults:
self.defaults.update(override_defaults)
# First, use a config file if it exists where expected
self.config_filename, self.cloud_config = self._load_config_file()
if not self.cloud_config:
self.cloud_config = {'clouds': {}}
if 'clouds' not in self.cloud_config:
self.cloud_config['clouds'] = {}
# Grab ipv6 preference settings from env
client_config = self.cloud_config.get('client', {})
if force_ipv4 is not None:
# If it's passed in to the constructor, honor it.
self.force_ipv4 = force_ipv4
else:
# Get the backwards compat value
prefer_ipv6 = get_boolean(
os.environ.pop(
'OS_PREFER_IPV6', client_config.get(
'prefer_ipv6', client_config.get(
'prefer-ipv6', True))))
force_ipv4 = get_boolean(
os.environ.pop(
'OS_FORCE_IPV4', client_config.get(
'force_ipv4', client_config.get(
'broken-ipv6', False))))
self.force_ipv4 = force_ipv4
if not prefer_ipv6:
# this will only be false if someone set it explicitly
# honor their wishes
self.force_ipv4 = True
# Next, process environment variables and add them to the mix
self.envvar_key = os.environ.pop('OS_CLOUD_NAME', 'envvars')
if self.envvar_key in self.cloud_config['clouds']:
raise exceptions.OpenStackConfigException(
'"{0}" defines a cloud named "{1}", but'
' OS_CLOUD_NAME is also set to "{1}". Please rename'
' either your environment based cloud, or one of your'
' file-based clouds.'.format(self.config_filename,
self.envvar_key))
# Pull out OS_CLOUD so that if it's the only thing set, do not
# make an envvars cloud
self.default_cloud = os.environ.pop('OS_CLOUD', None)
envvars = _get_os_environ()
if envvars:
self.cloud_config['clouds'][self.envvar_key] = envvars
# Finally, fall through and make a cloud that starts with defaults
# because we need somewhere to put arguments, and there are neither
# config files or env vars
if not self.cloud_config['clouds']:
self.cloud_config = dict(
clouds=dict(defaults=dict(self.defaults)))
self._cache_max_age = 0
self._cache_path = CACHE_PATH
self._cache_class = 'dogpile.cache.null'
self._cache_arguments = {}
if 'cache' in self.cloud_config:
self._cache_max_age = self.cloud_config['cache'].get(
'max_age', self._cache_max_age)
if self._cache_max_age:
self._cache_class = 'dogpile.cache.memory'
self._cache_path = os.path.expanduser(
self.cloud_config['cache'].get('path', self._cache_path))
self._cache_class = self.cloud_config['cache'].get(
'class', self._cache_class)
self._cache_arguments = self.cloud_config['cache'].get(
'arguments', self._cache_arguments)
def _load_config_file(self):
return self._load_yaml_file(self._config_files)
def _load_vendor_file(self):
return self._load_yaml_file(self._vendor_files)
def _load_yaml_file(self, filelist):
for path in filelist:
if os.path.exists(path):
with open(path, 'r') as f:
return path, yaml.safe_load(f)
return (None, None)
def _normalize_keys(self, config):
new_config = {}
for key, value in config.items():
key = key.replace('-', '_')
if isinstance(value, dict):
new_config[key] = self._normalize_keys(value)
else:
new_config[key] = value
return new_config
def get_cache_max_age(self):
return self._cache_max_age
def get_cache_path(self):
return self._cache_path
def get_cache_class(self):
return self._cache_class
def get_cache_arguments(self):
return self._cache_arguments
def _get_regions(self, cloud):
if cloud not in self.cloud_config['clouds']:
return ['']
config = self._normalize_keys(self.cloud_config['clouds'][cloud])
if 'regions' in config:
return config['regions']
elif 'region_name' in config:
regions = config['region_name'].split(',')
if len(regions) > 1:
warnings.warn(
"Comma separated lists in region_name are deprecated."
" Please use a yaml list in the regions"
" parameter in {0} instead.".format(self.config_filename))
return regions
else:
return ['']
def _get_region(self, cloud=None):
return self._get_regions(cloud)[0]
def get_cloud_names(self):
return self.cloud_config['clouds'].keys()
def _get_base_cloud_config(self, name):
cloud = dict()
# Only validate cloud name if one was given
if name and name not in self.cloud_config['clouds']:
raise exceptions.OpenStackConfigException(
"Named cloud {name} requested that was not found.".format(
name=name))
our_cloud = self.cloud_config['clouds'].get(name, dict())
# Get the defaults
cloud.update(self.defaults)
# Expand a profile if it exists. 'cloud' is an old confusing name
# for this.
profile_name = our_cloud.get('profile', our_cloud.get('cloud', None))
if profile_name and profile_name != self.envvar_key:
if 'cloud' in our_cloud:
warnings.warn(
"{0} use the keyword 'cloud' to reference a known "
"vendor profile. This has been deprecated in favor of the "
"'profile' keyword.".format(self.config_filename))
vendor_filename, vendor_file = self._load_vendor_file()
if vendor_file and profile_name in vendor_file['public-clouds']:
_auth_update(cloud, vendor_file['public-clouds'][profile_name])
else:
profile_data = vendors.get_profile(profile_name)
if profile_data:
_auth_update(cloud, profile_data)
else:
# Can't find the requested vendor config, go about business
warnings.warn("Couldn't find the vendor profile '{0}', for"
" the cloud '{1}'".format(profile_name,
name))
if 'auth' not in cloud:
cloud['auth'] = dict()
_auth_update(cloud, our_cloud)
if 'cloud' in cloud:
del cloud['cloud']
return self._fix_backwards_madness(cloud)
def _fix_backwards_madness(self, cloud):
cloud = self._fix_backwards_project(cloud)
cloud = self._fix_backwards_auth_plugin(cloud)
cloud = self._fix_backwards_interface(cloud)
cloud = self._handle_domain_id(cloud)
return cloud
def _handle_domain_id(self, cloud):
# Allow people to just specify domain once if it's the same
mappings = {
'domain_id': ('user_domain_id', 'project_domain_id'),
'domain_name': ('user_domain_name', 'project_domain_name'),
}
for target_key, possible_values in mappings.items():
for key in possible_values:
if target_key in cloud['auth'] and key not in cloud['auth']:
cloud['auth'][key] = cloud['auth'][target_key]
cloud['auth'].pop(target_key, None)
return cloud
def _fix_backwards_project(self, cloud):
# Do the lists backwards so that project_name is the ultimate winner
# Also handle moving domain names into auth so that domain mapping
# is easier
mappings = {
'project_id': ('tenant_id', 'tenant-id',
'project_id', 'project-id'),
'project_name': ('tenant_name', 'tenant-name',
'project_name', 'project-name'),
'domain_id': ('domain_id', 'domain-id'),
'domain_name': ('domain_name', 'domain-name'),
'user_domain_id': ('user_domain_id', 'user-domain-id'),
'user_domain_name': ('user_domain_name', 'user-domain-name'),
'project_domain_id': ('project_domain_id', 'project-domain-id'),
'project_domain_name': (
'project_domain_name', 'project-domain-name'),
}
for target_key, possible_values in mappings.items():
target = None
for key in possible_values:
if key in cloud:
target = str(cloud[key])
del cloud[key]
if key in cloud['auth']:
target = str(cloud['auth'][key])
del cloud['auth'][key]
if target:
cloud['auth'][target_key] = target
return cloud
def _fix_backwards_auth_plugin(self, cloud):
# Do the lists backwards so that auth_type is the ultimate winner
mappings = {
'auth_type': ('auth_plugin', 'auth_type'),
}
for target_key, possible_values in mappings.items():
target = None
for key in possible_values:
if key in cloud:
target = cloud[key]
del cloud[key]
cloud[target_key] = target
# Because we force alignment to v3 nouns, we want to force
# use of the auth plugin that can do auto-selection and dealing
# with that based on auth parameters. v2password is basically
# completely broken
if cloud['auth_type'] == 'v2password':
cloud['auth_type'] = 'password'
return cloud
def _fix_backwards_interface(self, cloud):
for key in cloud.keys():
if key.endswith('endpoint_type'):
target_key = key.replace('endpoint_type', 'interface')
cloud[target_key] = cloud.pop(key)
return cloud
def get_all_clouds(self):
clouds = []
for cloud in self.get_cloud_names():
for region in self._get_regions(cloud):
clouds.append(self.get_one_cloud(cloud, region_name=region))
return clouds
def _fix_args(self, args, argparse=None):
"""Massage the passed-in options
Replace - with _ and strip os_ prefixes.
Convert an argparse Namespace object to a dict, removing values
that are either None or ''.
"""
if argparse:
# Convert the passed-in Namespace
o_dict = vars(argparse)
parsed_args = dict()
for k in o_dict:
if o_dict[k] is not None and o_dict[k] != '':
parsed_args[k] = o_dict[k]
args.update(parsed_args)
os_args = dict()
new_args = dict()
for (key, val) in iter(args.items()):
if type(args[key]) == dict:
# dive into the auth dict
new_args[key] = self._fix_args(args[key])
continue
key = key.replace('-', '_')
if key.startswith('os_'):
os_args[key[3:]] = val
else:
new_args[key] = val
new_args.update(os_args)
return new_args
def _find_winning_auth_value(self, opt, config):
opt_name = opt.name.replace('-', '_')
if opt_name in config:
return config[opt_name]
else:
deprecated = getattr(opt, 'deprecated', getattr(
opt, 'deprecated_opts', []))
for d_opt in deprecated:
d_opt_name = d_opt.name.replace('-', '_')
if d_opt_name in config:
return config[d_opt_name]
def _get_auth_loader(self, config):
# Re-use the admin_token plugin for the "None" plugin
# since it does not look up endpoints or tokens but rather
# does a passthrough. This is useful for things like Ironic
# that have a keystoneless operational mode, but means we're
# still dealing with a keystoneauth Session object, so all the
# _other_ things (SSL arg handling, timeout) all work consistently
if config['auth_type'] in (None, "None", ''):
config['auth_type'] = 'admin_token'
# Set to notused rather than None because validate_auth will
# strip the value if it's actually python None
config['auth']['token'] = 'notused'
return loading.get_plugin_loader(config['auth_type'])
def _validate_auth_ksc(self, config):
try:
import keystoneclient.auth as ksc_auth
except ImportError:
return config
# May throw a keystoneclient.exceptions.NoMatchingPlugin
plugin_options = ksc_auth.get_plugin_class(
config['auth_type']).get_options()
for p_opt in plugin_options:
# if it's in config.auth, win, kill it from config dict
# if it's in config and not in config.auth, move it
# deprecated loses to current
# provided beats default, deprecated or not
winning_value = self._find_winning_auth_value(
p_opt, config['auth'])
if not winning_value:
winning_value = self._find_winning_auth_value(p_opt, config)
# if the plugin tells us that this value is required
# then error if it's doesn't exist now
if not winning_value and p_opt.required:
raise exceptions.OpenStackConfigException(
'Unable to find auth information for cloud'
' {cloud} in config files {files}'
' or environment variables. Missing value {auth_key}'
' required for auth plugin {plugin}'.format(
cloud=cloud, files=','.join(self._config_files),
auth_key=p_opt.name, plugin=config.get('auth_type')))
# Clean up after ourselves
for opt in [p_opt.name] + [o.name for o in p_opt.deprecated_opts]:
opt = opt.replace('-', '_')
config.pop(opt, None)
config['auth'].pop(opt, None)
if winning_value:
# Prefer the plugin configuration dest value if the value's key
# is marked as depreciated.
if p_opt.dest is None:
config['auth'][p_opt.name.replace('-', '_')] = (
winning_value)
else:
config['auth'][p_opt.dest] = winning_value
return config
def _validate_auth(self, config, loader):
# May throw a keystoneauth1.exceptions.NoMatchingPlugin
plugin_options = loader.get_options()
for p_opt in plugin_options:
# if it's in config.auth, win, kill it from config dict
# if it's in config and not in config.auth, move it
# deprecated loses to current
# provided beats default, deprecated or not
winning_value = self._find_winning_auth_value(
p_opt, config['auth'])
if not winning_value:
winning_value = self._find_winning_auth_value(p_opt, config)
# Clean up after ourselves
for opt in [p_opt.name] + [o.name for o in p_opt.deprecated]:
opt = opt.replace('-', '_')
config.pop(opt, None)
config['auth'].pop(opt, None)
if winning_value:
# Prefer the plugin configuration dest value if the value's key
# is marked as depreciated.
if p_opt.dest is None:
config['auth'][p_opt.name.replace('-', '_')] = (
winning_value)
else:
config['auth'][p_opt.dest] = winning_value
return config
def get_one_cloud(self, cloud=None, validate=True,
argparse=None, **kwargs):
"""Retrieve a single cloud configuration and merge additional options
:param string cloud:
The name of the configuration to load from clouds.yaml
:param boolean validate:
Validate the config. Setting this to False causes no auth plugin
to be created. It's really only useful for testing.
:param Namespace argparse:
An argparse Namespace object; allows direct passing in of
argparse options to be added to the cloud config. Values
of None and '' will be removed.
:param kwargs: Additional configuration options
:raises: keystoneauth1.exceptions.MissingRequiredOptions
on missing required auth parameters
"""
if cloud is None and self.default_cloud:
cloud = self.default_cloud
if cloud is None and self.envvar_key in self.get_cloud_names():
cloud = self.envvar_key
args = self._fix_args(kwargs, argparse=argparse)
if 'region_name' not in args or args['region_name'] is None:
args['region_name'] = self._get_region(cloud)
config = self._get_base_cloud_config(cloud)
# Regions is a list that we can use to create a list of cloud/region
# objects. It does not belong in the single-cloud dict
config.pop('regions', None)
# Can't just do update, because None values take over
for (key, val) in iter(args.items()):
if val is not None:
if key == 'auth' and config[key] is not None:
config[key] = _auth_update(config[key], val)
else:
config[key] = val
for key in BOOL_KEYS:
if key in config:
if type(config[key]) is not bool:
config[key] = get_boolean(config[key])
if loading:
if validate:
try:
loader = self._get_auth_loader(config)
config = self._validate_auth(config, loader)
auth_plugin = loader.load_from_options(**config['auth'])
except Exception as e:
# We WANT the ksa exception normally
# but OSC can't handle it right now, so we try deferring
# to ksc. If that ALSO fails, it means there is likely
# a deeper issue, so we assume the ksa error was correct
auth_plugin = None
try:
config = self._validate_auth_ksc(config)
except Exception:
raise e
else:
auth_plugin = None
else:
auth_plugin = None
config = self._validate_auth_ksc(config)
# If any of the defaults reference other values, we need to expand
for (key, value) in config.items():
if hasattr(value, 'format'):
config[key] = value.format(**config)
force_ipv4 = config.pop('force_ipv4', self.force_ipv4)
prefer_ipv6 = config.pop('prefer_ipv6', True)
if not prefer_ipv6:
force_ipv4 = True
if cloud is None:
cloud_name = ''
else:
cloud_name = str(cloud)
return cloud_config.CloudConfig(
name=cloud_name, region=config['region_name'],
config=self._normalize_keys(config),
force_ipv4=force_ipv4,
auth_plugin=auth_plugin)
@staticmethod
def set_one_cloud(config_file, cloud, set_config=None):
"""Set a single cloud configuration.
:param string config_file:
The path to the config file to edit. If this file does not exist
it will be created.
:param string cloud:
The name of the configuration to save to clouds.yaml
:param dict set_config: Configuration options to be set
"""
set_config = set_config or {}
cur_config = {}
try:
with open(config_file) as fh:
cur_config = yaml.safe_load(fh)
except IOError as e:
# Not no such file
if e.errno != 2:
raise
pass
clouds_config = cur_config.get('clouds', {})
cloud_config = _auth_update(clouds_config.get(cloud, {}), set_config)
clouds_config[cloud] = cloud_config
cur_config['clouds'] = clouds_config
with open(config_file, 'w') as fh:
yaml.safe_dump(cur_config, fh, default_flow_style=False)
if __name__ == '__main__':
config = OpenStackConfig().get_all_clouds()
for cloud in config:
print(cloud.name, cloud.region, cloud.config)
| |
import os
import time
from inspect import signature
import enchant
from celery import chord
from celery import group
from fluent import event
from fluent import sender
from .alg_tasks import database_tasks
from .alg_tasks import alg_utils
from .alg_tasks import credible_tasks
from .alg_tasks import gsuite_tasks
from .alg_tasks import mongo_tasks
from .alg_tasks import pickle_tasks
from .alg_tasks import report_tasks
from .app import app
UPDATE_CLIENT_BATCH_SIZE = alg_utils.get_config('tuning', 'update_client_batch_size')
VC_BATCH_SIZE = alg_utils.get_config('tuning', 'vc_batch_size')
NOTE_ARBITRATION_BATCH_SIZE = alg_utils.get_config('tuning', 'note_arbitration_batch_size')
CLINICAL_HOTWORDS = alg_utils.get_config('overwatch', 'clinical_hotwords')
IGNORE_WORDS = alg_utils.get_config('overwatch', 'ignore_words')
try:
IGNORE_WORDLIST_PATH = os.path.join(os.path.dirname(__file__), "ignore_words.txt")
except IOError:
IGNORE_WORDLIST = open(os.path.join(os.path.dirname(__file__), "ignore_words.txt"))
for word in IGNORE_WORDS:
IGNORE_WORDLIST.write(word)
IGNORE_WORDLIST_PATH = os.path.join(os.path.dirname(__file__), "ignore_words.txt")
sender.setup(
host=alg_utils.get_config('fluent', 'host'),
port=alg_utils.get_config('fluent', 'port'),
tag='alg.worker.overwatch')
@app.task
def vc_single(email):
service = gsuite_tasks.get_email_request(email)
request = service.users().messages().list(userId=email)
results = request.execute()
for result in results:
mongo_tasks.store_email(result)
next_results = service.users().messages().list_next(request, results)
while next_results:
for result in next_results:
mongo_tasks.store_email(result)
next_results = service.users().messages().list_next(request, results)
@app.task
def update_pickle_datastore():
data_types = ['csw', 'visits', 'clients']
team_data = report_tasks.get_csw_teams()
for team_id in team_data:
book_tasks = []
team_name = team_data[team_id]['team_name']
book_tasks.append(update_team_book.si(team_id, team_name))
for data_type in data_types:
book_tasks.append(get_datastore_report.s(team_id, data_type))
chord(group(book_tasks))(update_datastore.s(team_name=team_name))
@app.task
def update_single_pickle_datastore(target_team_id):
data_types = ['csw', 'visits', 'clients']
team_data = report_tasks.get_csw_teams()
for team_id in team_data:
if team_id is target_team_id:
book_tasks = []
team_name = team_data[team_id]['team_name']
book_tasks.append(update_team_book.si(team_id, team_name))
for data_type in data_types:
book_tasks.append(get_datastore_report.s(team_id, data_type))
chord(group(book_tasks))(update_datastore.s(team_name=team_name))
@app.task
def get_datastore_report(team_id, data_type):
return pickle_tasks.get_datastore_report(team_id, sheet_type=data_type)
@app.task
def update_datastore(results, team_name):
pickle_tasks.update_data_store(results, team_name)
@app.task
def update_team_book(team_id, team_name):
pickle_tasks.update_team_book(team_id, team_name)
@app.task
def synchronize_groups():
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started synchronize_groups'
}
})
group_map = gsuite_tasks.get_group_name_map()
credible_user_dict = report_tasks.get_credible_email_report()
for user_email in credible_user_dict:
if gsuite_tasks.check_user(user_email):
user_groups = gsuite_tasks.get_emp_group(user_email)
user_team = credible_user_dict[user_email]['team_name']
team_email = group_map[user_team]
if team_email not in user_groups:
gsuite_tasks.add_google_group_user(user_email, team_email, credible_user_dict[user_email]['is_admin'])
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished synchronize_groups'
}
})
@app.task
def update_emp_email(emp_id, emp_email):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started update_emp_email',
'employee id': emp_id,
'email': str(emp_email)
}
})
credible_tasks.update_employee_email(emp_id, emp_email)
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished update_emp_email',
'employee id': emp_id,
'email': str(emp_email)
}
})
@app.task
def lp_emp_email():
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started lp_emp_email'
}
})
credible_users_dict = report_tasks.get_credible_username_check()
for emp_id in credible_users_dict:
actual_email = credible_users_dict[emp_id]['actual_email']
expected_email = credible_users_dict[emp_id]['expected_email']
if not gsuite_tasks.check_user(actual_email):
credible_tasks.update_employee_email(emp_id=emp_id, emp_email=expected_email)
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished lp_emp_email'
}
})
@app.task
def generate_audit_targets(num_targets):
dates = alg_utils.get_audit_dates()
targets = report_tasks.get_audit_targets(dates['start_date'], dates['end_date'], num_targets)
return {'dates': dates, 'targets': targets}
@app.task
def select_audit_targets(num_targets):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started select_audit_targets',
'num_targets': num_targets
}
})
target_package = generate_audit_targets(num_targets)
dates = target_package['dates']
targets = target_package['targets']
subject = 'audit targets for ' + dates['start_date'] + ' - ' + dates['end_date']
for team_name in targets:
clinical_command = report_tasks.get_clinical_command(team_name)
clinical_command_emails = []
for emp_id in clinical_command:
clinical_command_emails.append(clinical_command[emp_id]['email'])
gsuite_tasks.send_email_to_multiple(clinical_command_emails, subject, str(targets[team_name]))
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished select_audit_targets',
'num_targets': num_targets
}
})
@app.task
def package_arbitration(field_checks, tx_check):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started package_arbitration',
'field_checks': field_checks,
'tx_check': tx_check
}
})
arbitration = {}
hotword_check = field_checks[0]
clone_check = field_checks[1]
ghost_check = field_checks[2]
if hotword_check or clone_check or ghost_check or tx_check:
arbitration['approve'] = False
arbitration['red_x_package'] = {}
else:
arbitration['approve'] = True
if hotword_check:
arbitration['red_x_package']['hotwords'] = hotword_check
if clone_check:
arbitration['red_x_package']['clones'] = clone_check
if ghost_check:
arbitration['red_x_package']['ghosts'] = ghost_check
if tx_check:
arbitration['red_x_package']['tx_plan'] = tx_check
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished package_arbitration',
'arbitration': arbitration
}
})
return arbitration
@app.task
def arbitrate_note(service_id, ordered_checks=None):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started arbitrate_note',
'service_id': service_id,
'ordered_checks': ordered_checks
}
})
if ordered_checks is None:
ordered_checks = {}
go_ahead = database_tasks.check_single_service_status(service_id)
if go_ahead:
database_tasks.record_arbitration_start(service_id, time.time())
service_package = report_tasks.get_service_package(service_id)
fields = report_tasks.get_commsupt_fields(service_id)
header = []
for check in ordered_checks:
if check in locals():
called_function = locals()[check]
arguments = ordered_checks[check]
sig = signature(called_function)
if len(arguments) != len(sig.parameters) - 1:
raise SyntaxError('incorrect arguments passed for ' + str(called_function))
header.append(called_function.s(*arguments))
else:
raise KeyError('requested check does not exist!')
field_check = check_fields(
service_id,
fields=fields,
ghost=True,
hotword=True,
clone=True,
similarities=False,
restrict_to_full_clones=True
)
tx_date_check = check_tx_date(service_id)
arbitration = package_arbitration(field_check, tx_date_check)
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished arbitrate_note',
'service_id': service_id,
'arbitration': {'service_package': service_package, 'arbitration': arbitration}
}
})
return {service_id: {'service_package': service_package, 'arbitration': arbitration}}
@app.task
def adjust_notes(results):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started adjust_notes',
'results': results
}
})
checked_results = []
if type(results) is not list:
checked_results.append(results)
else:
checked_results = results
credible_tasks.batch_adjust_notes(checked_results)
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished adjust_notes',
'checked_results': checked_results
}
})
@app.task
def lp_unapproved_commsupt():
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started lp_unapproved_commsupt'
}
})
unapproved_service_ids = report_tasks.get_unapproved_pilot_whales()
if unapproved_service_ids:
pending_service_ids = database_tasks.check_arbitration_status(unapproved_service_ids)
ids_for_arbitration = [x for x in unapproved_service_ids if x not in pending_service_ids]
batch_count = 0
header = []
for unapproved_service_id in ids_for_arbitration:
if batch_count >= NOTE_ARBITRATION_BATCH_SIZE:
chord(header)(adjust_notes.s())
header = []
batch_count = 0
else:
header.append(arbitrate_note.s(unapproved_service_id))
batch_count += 1
if len(header) > 0:
chord(header)(adjust_notes.s())
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished lp_unapproved_commsupt'
}
})
@app.task
def lp_clinical_team():
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started lp_clinical_team'
}
})
update_data = report_tasks.get_profile_changes()
count = 0
header = []
batch = {}
for client_id in update_data:
if count >= UPDATE_CLIENT_BATCH_SIZE:
header.append(update_client_profile_batch.s(batch))
batch = {}
count = 0
else:
update_package = update_data[client_id]
batch[client_id] = update_package
count += 1
if len(batch) > 0:
header.append(update_client_profile_batch.s(batch))
group(header)()
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished lp_clinical_team'
}
})
@app.task
def update_client_profile_batch(name_package):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started update_client_profile_batch',
'name_package': name_package
}
})
credible_tasks.update_client_batch(name_package)
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished update_client_profile_batch',
'name_package': name_package
}
})
@app.task
def check_fields(service_id, hotword, clone, ghost, similarities, restrict_to_full_clones=False, fields=None):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started check_fields',
'service_id': service_id,
'hotword': hotword,
'clone': clone,
'ghost': ghost,
'similarities': similarities,
'restrict_to_full_clones': restrict_to_full_clones,
'fields': fields
}
})
if fields is None:
fields = report_tasks.get_commsupt_fields(service_id)
header = []
if hotword:
header.append(check_for_hotwords(fields))
if clone:
header.append(check_for_clones(
fields,
restrict_to_full_clones=restrict_to_full_clones))
if ghost:
header.append(check_for_ghosts(fields))
if similarities:
header.append(check_for_field_similarities(fields))
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished check_fields',
'header': header
}
})
return header
@app.task
def check_for_clones(fields, restrict_to_full_clones=False):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started check_for_clones',
'fields': fields,
'restrict_to_full_clones': restrict_to_full_clones
}
})
clone_package = {}
presentation = fields['presentation']
response = fields['response']
clientvisit_id = fields['clientvisit_id']
clones = report_tasks.check_service_for_clones(presentation, response, clientvisit_id)
for service_id in clones:
match = clones[service_id]['match']
if match not in clone_package:
clone_package[match] = [service_id]
else:
clone_package[match].append(service_id)
if restrict_to_full_clones:
filtered_clones = {}
for match_type in clones:
full_match = 'presentation & response'
if match_type == full_match:
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished check_for_clones',
'match_type': clone_package[match_type]
}
})
return {full_match: clone_package[match_type]}
return filtered_clones
elif not restrict_to_full_clones:
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished check_for_clones',
'clone_packgage': clone_package
}
})
return clone_package
@app.task
def check_for_hotwords(fields):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started check_for_hotwords',
'fields': fields
}
})
tagged_hotwords = {}
for field_name in fields:
field = fields[field_name].lower()
field = field.replace(',', '')
words = field.split(' ')
for hotword in CLINICAL_HOTWORDS:
if hotword in words:
if field_name not in tagged_hotwords:
tagged_hotwords[field_name] = [hotword]
else:
tagged_hotwords[field_name].append(hotword)
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished check_for_hotwords',
'tagged_hotwords': tagged_hotwords
}
})
return tagged_hotwords
@app.task
def check_for_ghosts(fields):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started check_for_ghosts',
'fields': fields
}
})
ghost_results = {}
checker = enchant.DictWithPWL('en_US', IGNORE_WORDLIST_PATH)
for field_name in fields:
off_words = []
if field_name != 'clientvisit_id':
field = fields[field_name].lower()
if len(field) < 11:
ghost_results['character_results'] = {
field_name: str(len(field))
}
field = field.replace(',', ' ')
field = field.replace('"', ' ')
field = field.replace('\'', ' ')
field = field.replace('/', ' ')
field = field.replace('\n', ' ')
field = field.replace('-', ' ')
field = field.replace('.', ' ')
field = field.replace('(', ' ')
field = field.replace(')', ' ')
field = field.replace('&', ' ')
field = field.replace(':', ' ')
words = field.split(' ')
if len(words) < 6:
ghost_results['word_results'] = {
field_name: str(len(words))
}
for word in words:
if word and not alg_utils.is_int(word):
if not checker.check(word):
off_words.append(word)
if len(off_words) / len(words) > 1 / 6:
ghost_results['spelling'] = {
field_name: off_words
}
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished check_for_ghosts',
'ghost_results': ghost_results
}
})
return ghost_results
@app.task
def package_all_field_similarities(results):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started package_all_field_similarities',
'results': results
}
})
returned_data = {}
for field_name in results:
results = results[field_name]
if results:
returned_data[field_name] = results
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished package_all_field_similarities',
'returned_data': returned_data
}
})
return returned_data
@app.task
def check_for_field_similarities(fields):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started check_for_field_similarities',
'fields': fields
}
})
header = []
service_id = fields['clientvisit_id']
del fields['clientvisit_id']
for field_name in fields:
field_value = fields[field_name]
header.append(calculate_field_similarity.s(field_name, field_value, service_id))
result = chord(header)(package_all_field_similarities.s())
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished check_for_field_similarities',
'result': result
}
})
return result
@app.task
def calculate_similarities(fields):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started calculate_similarities',
'fields': fields
}
})
service_id = fields['clientvisit_id']
del fields['clientvisit_id']
for field_name in fields:
field_value = fields[field_name]
calculate_field_similarity.delay(field_name, field_value, service_id)
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished calculate_similarities',
'fields': fields
}
})
@app.task
def calculate_service_similarities(service_id):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started calculate_service_similarities',
'service_id': service_id
}
})
fields = report_tasks.get_commsupt_fields(service_id)
calculate_similarities(fields)
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished calculate_service_similarities',
'service_id': service_id
}
})
@app.task
def calculate_field_similarity(field_name, field_value, service_id):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started calculate_field_similarity',
'field_name': field_name,
'field_value': field_value,
'service_id': service_id
}
})
field_ids = {
'presentation': '514015',
'interventions': '514018',
'response': '514019'
}
header = []
field_id = field_ids[field_name]
foreign_fields = report_tasks.get_given_field(field_id, service_id)
for foreign_service_id in foreign_fields:
foreign_field = foreign_fields[foreign_service_id]['field']
header.append(get_lcs_percentage.s(
foreign_service_id,
service_id,
field_id,
foreign_field,
field_value))
chord(header)(package_single_field_similarities.s(field_name=field_name))
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished calculate_field_similarity',
'field_name': field_name,
'field_value': field_value,
'service_id': service_id
}
})
@app.task
def store_field_similarities(field_name, field_value, service_id):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started store_field_similarity',
'field_name': field_name,
'field_value': field_value,
'service_id': service_id
}
})
field_ids = {
'presentation': '514015',
'interventions': '514018',
'response': '514019'
}
header = []
field_id = field_ids[field_name]
foreign_fields = report_tasks.get_given_field(field_id, service_id)
for foreign_service_id in foreign_fields:
foreign_field = foreign_fields[foreign_service_id]['field']
header.append(get_lcs_percentage.delay(
foreign_service_id,
service_id,
field_id,
foreign_field,
field_value))
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished store_field_similarity',
'field_name': field_name,
'field_value': field_value,
'service_id': service_id
}
})
@app.task
def package_single_field_similarities(results, field_name):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started package_single_field_similarities',
'results': results,
'field_name': field_name
}
})
returned_data = {}
for service_id in results:
result = results[service_id]
if result > 0.9:
returned_data[service_id] = result
if returned_data:
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished package_single_field_similarities',
'field_name': field_name[returned_data]
}
})
return {field_name: returned_data}
else:
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished package_single_field_similarities',
'field_name': '{}'
}
})
return {}
@app.task
def get_lcs_percentage(foreign_service_id, local_service_id, field_id, foreign_field, local_field):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started get_lcs_percentage',
'foreign_service_id': foreign_service_id,
'local_service_id': local_service_id,
'field_id': field_id,
'foreign_field': foreign_field,
'local_field': local_field
}
})
result = lcs(local_field, foreign_field)
percentage_match = float(len(result)) / float(len(local_field))
database_tasks.store_similarity(
local_service_id=local_service_id,
foreign_service_id=foreign_service_id,
field_id=field_id,
similarity=round(percentage_match, 2)
)
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished get_lcs_percentage',
'return': {foreign_service_id: round(percentage_match, 2)}
}
})
return {foreign_service_id: round(percentage_match, 2)}
@app.task
def lcs(a, b):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started lcs',
'a': a,
'b': b
}
})
lengths = [[0 for j in range(len(b) + 1)] for i in range(len(a) + 1)]
# row 0 and column 0 are initialized to 0 already
for i, x in enumerate(a):
for j, y in enumerate(b):
if x == y:
lengths[i + 1][j + 1] = lengths[i][j] + 1
else:
lengths[i + 1][j + 1] = max(lengths[i + 1][j], lengths[i][j + 1])
# read the substring out from the matrix
result = ""
x, y = len(a), len(b)
while x != 0 and y != 0:
if lengths[x][y] == lengths[x - 1][y]:
x -= 1
elif lengths[x][y] == lengths[x][y - 1]:
y -= 1
else:
assert a[x - 1] == b[y - 1]
result = a[x - 1] + result
x -= 1
y -= 1
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished lcs',
'result': result
}
})
return result
@app.task
def check_tx_date(service_id):
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'started check_tx_date',
'service_id': service_id
}
})
tx_date_check = {}
tx_plan_check = report_tasks.check_tx_plan_date(service_id)
if not tx_plan_check:
tx_date_check['expired'] = True
event.Event('event', {
'task': 'overwatch_tasks',
'info': {
'message': 'finished check_tx_date',
'tx_date_check': tx_date_check
}
})
return tx_date_check
| |
import numpy as np
import pytest
from pandas import (
NA,
DataFrame,
IndexSlice,
MultiIndex,
NaT,
Timestamp,
option_context,
)
pytest.importorskip("jinja2")
from pandas.io.formats.style import Styler
from pandas.io.formats.style_render import _str_escape
@pytest.fixture
def df():
return DataFrame(
data=[[0, -0.609], [1, -1.228]],
columns=["A", "B"],
index=["x", "y"],
)
@pytest.fixture
def styler(df):
return Styler(df, uuid_len=0)
def test_display_format(styler):
ctx = styler.format("{:0.1f}")._translate(True, True)
assert all(["display_value" in c for c in row] for row in ctx["body"])
assert all([len(c["display_value"]) <= 3 for c in row[1:]] for row in ctx["body"])
assert len(ctx["body"][0][1]["display_value"].lstrip("-")) <= 3
@pytest.mark.parametrize("index", [True, False])
@pytest.mark.parametrize("columns", [True, False])
def test_display_format_index(styler, index, columns):
exp_index = ["x", "y"]
if index:
styler.format_index(lambda v: v.upper(), axis=0) # test callable
exp_index = ["X", "Y"]
exp_columns = ["A", "B"]
if columns:
styler.format_index("*{}*", axis=1) # test string
exp_columns = ["*A*", "*B*"]
ctx = styler._translate(True, True)
for r, row in enumerate(ctx["body"]):
assert row[0]["display_value"] == exp_index[r]
for c, col in enumerate(ctx["head"][1:]):
assert col["display_value"] == exp_columns[c]
def test_format_dict(styler):
ctx = styler.format({"A": "{:0.1f}", "B": "{0:.2%}"})._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "0.0"
assert ctx["body"][0][2]["display_value"] == "-60.90%"
def test_format_index_dict(styler):
ctx = styler.format_index({0: lambda v: v.upper()})._translate(True, True)
for i, val in enumerate(["X", "Y"]):
assert ctx["body"][i][0]["display_value"] == val
def test_format_string(styler):
ctx = styler.format("{:.2f}")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "0.00"
assert ctx["body"][0][2]["display_value"] == "-0.61"
assert ctx["body"][1][1]["display_value"] == "1.00"
assert ctx["body"][1][2]["display_value"] == "-1.23"
def test_format_callable(styler):
ctx = styler.format(lambda v: "neg" if v < 0 else "pos")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "pos"
assert ctx["body"][0][2]["display_value"] == "neg"
assert ctx["body"][1][1]["display_value"] == "pos"
assert ctx["body"][1][2]["display_value"] == "neg"
def test_format_with_na_rep():
# GH 21527 28358
df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"])
ctx = df.style.format(None, na_rep="-")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "-"
assert ctx["body"][0][2]["display_value"] == "-"
ctx = df.style.format("{:.2%}", na_rep="-")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "-"
assert ctx["body"][0][2]["display_value"] == "-"
assert ctx["body"][1][1]["display_value"] == "110.00%"
assert ctx["body"][1][2]["display_value"] == "120.00%"
ctx = df.style.format("{:.2%}", na_rep="-", subset=["B"])._translate(True, True)
assert ctx["body"][0][2]["display_value"] == "-"
assert ctx["body"][1][2]["display_value"] == "120.00%"
def test_format_index_with_na_rep():
df = DataFrame([[1, 2, 3, 4, 5]], columns=["A", None, np.nan, NaT, NA])
ctx = df.style.format_index(None, na_rep="--", axis=1)._translate(True, True)
assert ctx["head"][0][1]["display_value"] == "A"
for i in [2, 3, 4, 5]:
assert ctx["head"][0][i]["display_value"] == "--"
def test_format_non_numeric_na():
# GH 21527 28358
df = DataFrame(
{
"object": [None, np.nan, "foo"],
"datetime": [None, NaT, Timestamp("20120101")],
}
)
ctx = df.style.format(None, na_rep="-")._translate(True, True)
assert ctx["body"][0][1]["display_value"] == "-"
assert ctx["body"][0][2]["display_value"] == "-"
assert ctx["body"][1][1]["display_value"] == "-"
assert ctx["body"][1][2]["display_value"] == "-"
@pytest.mark.parametrize(
"func, attr, kwargs",
[
("format", "_display_funcs", {}),
("format_index", "_display_funcs_index", {"axis": 0}),
("format_index", "_display_funcs_columns", {"axis": 1}),
],
)
def test_format_clear(styler, func, attr, kwargs):
assert (0, 0) not in getattr(styler, attr) # using default
getattr(styler, func)("{:.2f}", **kwargs)
assert (0, 0) in getattr(styler, attr) # formatter is specified
getattr(styler, func)(**kwargs)
assert (0, 0) not in getattr(styler, attr) # formatter cleared to default
@pytest.mark.parametrize(
"escape, exp",
[
("html", "<>&"%$#_{}~^\\~ ^ \\ "),
(
"latex",
'<>\\&"\\%\\$\\#\\_\\{\\}\\textasciitilde \\textasciicircum '
"\\textbackslash \\textasciitilde \\space \\textasciicircum \\space "
"\\textbackslash \\space ",
),
],
)
def test_format_escape_html(escape, exp):
chars = '<>&"%$#_{}~^\\~ ^ \\ '
df = DataFrame([[chars]])
s = Styler(df, uuid_len=0).format("&{0}&", escape=None)
expected = f'<td id="T__row0_col0" class="data row0 col0" >&{chars}&</td>'
assert expected in s.to_html()
# only the value should be escaped before passing to the formatter
s = Styler(df, uuid_len=0).format("&{0}&", escape=escape)
expected = f'<td id="T__row0_col0" class="data row0 col0" >&{exp}&</td>'
assert expected in s.to_html()
# also test format_index()
styler = Styler(DataFrame(columns=[chars]), uuid_len=0)
styler.format_index("&{0}&", escape=None, axis=1)
assert styler._translate(True, True)["head"][0][1]["display_value"] == f"&{chars}&"
styler.format_index("&{0}&", escape=escape, axis=1)
assert styler._translate(True, True)["head"][0][1]["display_value"] == f"&{exp}&"
def test_format_escape_na_rep():
# tests the na_rep is not escaped
df = DataFrame([['<>&"', None]])
s = Styler(df, uuid_len=0).format("X&{0}>X", escape="html", na_rep="&")
ex = '<td id="T__row0_col0" class="data row0 col0" >X&<>&">X</td>'
expected2 = '<td id="T__row0_col1" class="data row0 col1" >&</td>'
assert ex in s.to_html()
assert expected2 in s.to_html()
# also test for format_index()
df = DataFrame(columns=['<>&"', None])
styler = Styler(df, uuid_len=0)
styler.format_index("X&{0}>X", escape="html", na_rep="&", axis=1)
ctx = styler._translate(True, True)
assert ctx["head"][0][1]["display_value"] == "X&<>&">X"
assert ctx["head"][0][2]["display_value"] == "&"
def test_format_escape_floats(styler):
# test given formatter for number format is not impacted by escape
s = styler.format("{:.1f}", escape="html")
for expected in [">0.0<", ">1.0<", ">-1.2<", ">-0.6<"]:
assert expected in s.to_html()
# tests precision of floats is not impacted by escape
s = styler.format(precision=1, escape="html")
for expected in [">0<", ">1<", ">-1.2<", ">-0.6<"]:
assert expected in s.to_html()
@pytest.mark.parametrize("formatter", [5, True, [2.0]])
@pytest.mark.parametrize("func", ["format", "format_index"])
def test_format_raises(styler, formatter, func):
with pytest.raises(TypeError, match="expected str or callable"):
getattr(styler, func)(formatter)
@pytest.mark.parametrize(
"precision, expected",
[
(1, ["1.0", "2.0", "3.2", "4.6"]),
(2, ["1.00", "2.01", "3.21", "4.57"]),
(3, ["1.000", "2.009", "3.212", "4.566"]),
],
)
def test_format_with_precision(precision, expected):
# Issue #13257
df = DataFrame([[1.0, 2.0090, 3.2121, 4.566]], columns=[1.0, 2.0090, 3.2121, 4.566])
styler = Styler(df)
styler.format(precision=precision)
styler.format_index(precision=precision, axis=1)
ctx = styler._translate(True, True)
for col, exp in enumerate(expected):
assert ctx["body"][0][col + 1]["display_value"] == exp # format test
assert ctx["head"][0][col + 1]["display_value"] == exp # format_index test
@pytest.mark.parametrize("axis", [0, 1])
@pytest.mark.parametrize(
"level, expected",
[
(0, ["X", "X", "_", "_"]), # level int
("zero", ["X", "X", "_", "_"]), # level name
(1, ["_", "_", "X", "X"]), # other level int
("one", ["_", "_", "X", "X"]), # other level name
([0, 1], ["X", "X", "X", "X"]), # both levels
([0, "zero"], ["X", "X", "_", "_"]), # level int and name simultaneous
([0, "one"], ["X", "X", "X", "X"]), # both levels as int and name
(["one", "zero"], ["X", "X", "X", "X"]), # both level names, reversed
],
)
def test_format_index_level(axis, level, expected):
midx = MultiIndex.from_arrays([["_", "_"], ["_", "_"]], names=["zero", "one"])
df = DataFrame([[1, 2], [3, 4]])
if axis == 0:
df.index = midx
else:
df.columns = midx
styler = df.style.format_index(lambda v: "X", level=level, axis=axis)
ctx = styler._translate(True, True)
if axis == 0: # compare index
result = [ctx["body"][s][0]["display_value"] for s in range(2)]
result += [ctx["body"][s][1]["display_value"] for s in range(2)]
else: # compare columns
result = [ctx["head"][0][s + 1]["display_value"] for s in range(2)]
result += [ctx["head"][1][s + 1]["display_value"] for s in range(2)]
assert expected == result
def test_format_subset():
df = DataFrame([[0.1234, 0.1234], [1.1234, 1.1234]], columns=["a", "b"])
ctx = df.style.format(
{"a": "{:0.1f}", "b": "{0:.2%}"}, subset=IndexSlice[0, :]
)._translate(True, True)
expected = "0.1"
raw_11 = "1.123400"
assert ctx["body"][0][1]["display_value"] == expected
assert ctx["body"][1][1]["display_value"] == raw_11
assert ctx["body"][0][2]["display_value"] == "12.34%"
ctx = df.style.format("{:0.1f}", subset=IndexSlice[0, :])._translate(True, True)
assert ctx["body"][0][1]["display_value"] == expected
assert ctx["body"][1][1]["display_value"] == raw_11
ctx = df.style.format("{:0.1f}", subset=IndexSlice["a"])._translate(True, True)
assert ctx["body"][0][1]["display_value"] == expected
assert ctx["body"][0][2]["display_value"] == "0.123400"
ctx = df.style.format("{:0.1f}", subset=IndexSlice[0, "a"])._translate(True, True)
assert ctx["body"][0][1]["display_value"] == expected
assert ctx["body"][1][1]["display_value"] == raw_11
ctx = df.style.format("{:0.1f}", subset=IndexSlice[[0, 1], ["a"]])._translate(
True, True
)
assert ctx["body"][0][1]["display_value"] == expected
assert ctx["body"][1][1]["display_value"] == "1.1"
assert ctx["body"][0][2]["display_value"] == "0.123400"
assert ctx["body"][1][2]["display_value"] == raw_11
@pytest.mark.parametrize("formatter", [None, "{:,.1f}"])
@pytest.mark.parametrize("decimal", [".", "*"])
@pytest.mark.parametrize("precision", [None, 2])
@pytest.mark.parametrize("func, col", [("format", 1), ("format_index", 0)])
def test_format_thousands(formatter, decimal, precision, func, col):
styler = DataFrame([[1000000.123456789]], index=[1000000.123456789]).style
result = getattr(styler, func)( # testing float
thousands="_", formatter=formatter, decimal=decimal, precision=precision
)._translate(True, True)
assert "1_000_000" in result["body"][0][col]["display_value"]
styler = DataFrame([[1000000]], index=[1000000]).style
result = getattr(styler, func)( # testing int
thousands="_", formatter=formatter, decimal=decimal, precision=precision
)._translate(True, True)
assert "1_000_000" in result["body"][0][col]["display_value"]
styler = DataFrame([[1 + 1000000.123456789j]], index=[1 + 1000000.123456789j]).style
result = getattr(styler, func)( # testing complex
thousands="_", formatter=formatter, decimal=decimal, precision=precision
)._translate(True, True)
assert "1_000_000" in result["body"][0][col]["display_value"]
@pytest.mark.parametrize("formatter", [None, "{:,.4f}"])
@pytest.mark.parametrize("thousands", [None, ",", "*"])
@pytest.mark.parametrize("precision", [None, 4])
@pytest.mark.parametrize("func, col", [("format", 1), ("format_index", 0)])
def test_format_decimal(formatter, thousands, precision, func, col):
styler = DataFrame([[1000000.123456789]], index=[1000000.123456789]).style
result = getattr(styler, func)( # testing float
decimal="_", formatter=formatter, thousands=thousands, precision=precision
)._translate(True, True)
assert "000_123" in result["body"][0][col]["display_value"]
styler = DataFrame([[1 + 1000000.123456789j]], index=[1 + 1000000.123456789j]).style
result = getattr(styler, func)( # testing complex
decimal="_", formatter=formatter, thousands=thousands, precision=precision
)._translate(True, True)
assert "000_123" in result["body"][0][col]["display_value"]
def test_str_escape_error():
msg = "`escape` only permitted in {'html', 'latex'}, got "
with pytest.raises(ValueError, match=msg):
_str_escape("text", "bad_escape")
with pytest.raises(ValueError, match=msg):
_str_escape("text", [])
_str_escape(2.00, "bad_escape") # OK since dtype is float
def test_format_options():
df = DataFrame({"int": [2000, 1], "float": [1.009, None], "str": ["&<", "&~"]})
ctx = df.style._translate(True, True)
# test option: na_rep
assert ctx["body"][1][2]["display_value"] == "nan"
with option_context("styler.format.na_rep", "MISSING"):
ctx_with_op = df.style._translate(True, True)
assert ctx_with_op["body"][1][2]["display_value"] == "MISSING"
# test option: decimal and precision
assert ctx["body"][0][2]["display_value"] == "1.009000"
with option_context("styler.format.decimal", "_"):
ctx_with_op = df.style._translate(True, True)
assert ctx_with_op["body"][0][2]["display_value"] == "1_009000"
with option_context("styler.format.precision", 2):
ctx_with_op = df.style._translate(True, True)
assert ctx_with_op["body"][0][2]["display_value"] == "1.01"
# test option: thousands
assert ctx["body"][0][1]["display_value"] == "2000"
with option_context("styler.format.thousands", "_"):
ctx_with_op = df.style._translate(True, True)
assert ctx_with_op["body"][0][1]["display_value"] == "2_000"
# test option: escape
assert ctx["body"][0][3]["display_value"] == "&<"
assert ctx["body"][1][3]["display_value"] == "&~"
with option_context("styler.format.escape", "html"):
ctx_with_op = df.style._translate(True, True)
assert ctx_with_op["body"][0][3]["display_value"] == "&<"
with option_context("styler.format.escape", "latex"):
ctx_with_op = df.style._translate(True, True)
assert ctx_with_op["body"][1][3]["display_value"] == "\\&\\textasciitilde "
# test option: formatter
with option_context("styler.format.formatter", {"int": "{:,.2f}"}):
ctx_with_op = df.style._translate(True, True)
assert ctx_with_op["body"][0][1]["display_value"] == "2,000.00"
def test_precision_zero(df):
styler = Styler(df, precision=0)
ctx = styler._translate(True, True)
assert ctx["body"][0][2]["display_value"] == "-1"
assert ctx["body"][1][2]["display_value"] == "-1"
@pytest.mark.parametrize(
"formatter, exp",
[
(lambda x: f"{x:.3f}", "9.000"),
("{:.2f}", "9.00"),
({0: "{:.1f}"}, "9.0"),
(None, "9"),
],
)
def test_formatter_options_validator(formatter, exp):
df = DataFrame([[9]])
with option_context("styler.format.formatter", formatter):
assert f" {exp} " in df.style.to_latex()
def test_formatter_options_raises():
msg = "Value must be an instance of"
with pytest.raises(ValueError, match=msg):
with option_context("styler.format.formatter", ["bad", "type"]):
DataFrame().style.to_latex()
def test_1level_multiindex():
# GH 43383
midx = MultiIndex.from_product([[1, 2]], names=[""])
df = DataFrame(-1, index=midx, columns=[0, 1])
ctx = df.style._translate(True, True)
assert ctx["body"][0][0]["display_value"] == "1"
assert ctx["body"][0][0]["is_visible"] is True
assert ctx["body"][1][0]["display_value"] == "2"
assert ctx["body"][1][0]["is_visible"] is True
| |
import py
from rpython.jit.metainterp import jitexc
from rpython.jit.metainterp.warmspot import get_stats
from rpython.rlib.jit import JitDriver, set_param, unroll_safe, jit_callback
from rpython.jit.backend.llgraph import runner
from rpython.jit.metainterp.test.support import LLJitMixin
from rpython.jit.metainterp.optimizeopt import ALL_OPTS_NAMES
class Exit(Exception):
def __init__(self, result):
self.result = result
class TestLLWarmspot(LLJitMixin):
CPUClass = runner.LLGraphCPU
def test_basic(self):
mydriver = JitDriver(reds=['a'],
greens=['i'])
CODE_INCREASE = 0
CODE_JUMP = 1
lst = [CODE_INCREASE, CODE_INCREASE, CODE_JUMP]
def interpreter_loop(a):
i = 0
while True:
mydriver.jit_merge_point(i=i, a=a)
if i >= len(lst):
break
elem = lst[i]
if elem == CODE_INCREASE:
a = a + 1
i += 1
elif elem == CODE_JUMP:
if a < 20:
i = 0
mydriver.can_enter_jit(i=i, a=a)
else:
i += 1
else:
pass
raise Exit(a)
def main(a):
try:
interpreter_loop(a)
except Exit as e:
return e.result
res = self.meta_interp(main, [1])
assert res == 21
def test_reentry(self):
mydriver = JitDriver(reds = ['n'], greens = [])
def f(n):
while n > 0:
mydriver.can_enter_jit(n=n)
mydriver.jit_merge_point(n=n)
if n % 20 == 0:
n -= 2
n -= 1
res = self.meta_interp(f, [60])
assert res == f(30)
def test_location(self):
def get_printable_location(n):
return 'GREEN IS %d.' % n
myjitdriver = JitDriver(greens=['n'], reds=['m'],
get_printable_location=get_printable_location)
def f(n, m):
while m > 0:
myjitdriver.can_enter_jit(n=n, m=m)
myjitdriver.jit_merge_point(n=n, m=m)
m -= 1
self.meta_interp(f, [123, 10])
assert len(get_stats().locations) >= 4
for loc in get_stats().locations:
assert loc == (0, 0, 123)
def test_set_param_enable_opts(self):
from rpython.rtyper.annlowlevel import llstr, hlstr
myjitdriver = JitDriver(greens = [], reds = ['n'])
class A(object):
def m(self, n):
return n-1
def g(n):
while n > 0:
myjitdriver.can_enter_jit(n=n)
myjitdriver.jit_merge_point(n=n)
n = A().m(n)
return n
def f(n, enable_opts):
set_param(None, 'enable_opts', hlstr(enable_opts))
return g(n)
# check that the set_param will override the default
res = self.meta_interp(f, [10, llstr('')])
assert res == 0
self.check_resops(new_with_vtable=1)
res = self.meta_interp(f, [10, llstr(ALL_OPTS_NAMES)],
enable_opts='')
assert res == 0
self.check_resops(new_with_vtable=0)
def test_unwanted_loops(self):
mydriver = JitDriver(reds = ['n', 'total', 'm'], greens = [])
def loop1(n):
# the jit should not look here, as there is a loop
res = 0
for i in range(n):
res += i
return res
@unroll_safe
def loop2(n):
# the jit looks here, due to the decorator
for i in range(5):
n += 1
return n
def f(m):
total = 0
n = 0
while n < m:
mydriver.can_enter_jit(n=n, total=total, m=m)
mydriver.jit_merge_point(n=n, total=total, m=m)
total += loop1(n)
n = loop2(n)
return total
self.meta_interp(f, [50])
self.check_enter_count_at_most(2)
def test_wanted_unrolling_and_preinlining(self):
mydriver = JitDriver(reds = ['n', 'm'], greens = [])
@unroll_safe
def loop2(n):
# the jit looks here, due to the decorator
for i in range(5):
n += 1
return n
loop2._always_inline_ = True
def g(n):
return loop2(n)
g._dont_inline_ = True
def f(m):
n = 0
while n < m:
mydriver.can_enter_jit(n=n, m=m)
mydriver.jit_merge_point(n=n, m=m)
n = g(n)
return n
self.meta_interp(f, [50], backendopt=True)
self.check_enter_count_at_most(2)
self.check_resops(call=0)
def test_loop_header(self):
# artificial test: we enter into the JIT only when can_enter_jit()
# is seen, but we close a loop in the JIT much more quickly
# because of loop_header().
mydriver = JitDriver(reds = ['n', 'm'], greens = [])
def f(m):
n = 0
while True:
mydriver.jit_merge_point(n=n, m=m)
if n > m:
m -= 1
if m < 0:
return n
n = 0
mydriver.can_enter_jit(n=n, m=m)
else:
n += 1
mydriver.loop_header()
assert f(15) == 1
res = self.meta_interp(f, [15], backendopt=True)
assert res == 1
self.check_resops(int_add=2) # I get 13 without the loop_header()
def test_omit_can_enter_jit(self):
# Simple test comparing the effects of always giving a can_enter_jit(),
# or not giving any. Mostly equivalent, except that if given, it is
# ignored the first time, and so it ends up taking one extra loop to
# start JITting.
mydriver = JitDriver(greens=[], reds=['m'])
#
for i2 in range(10):
def f2(m):
while m > 0:
mydriver.jit_merge_point(m=m)
m -= 1
self.meta_interp(f2, [i2])
try:
self.check_jitcell_token_count(1)
break
except AssertionError:
print "f2: no loop generated for i2==%d" % i2
else:
raise # re-raise the AssertionError: check_loop_count never 1
#
for i1 in range(10):
def f1(m):
while m > 0:
mydriver.can_enter_jit(m=m)
mydriver.jit_merge_point(m=m)
m -= 1
self.meta_interp(f1, [i1])
try:
self.check_jitcell_token_count(1)
break
except AssertionError:
print "f1: no loop generated for i1==%d" % i1
else:
raise # re-raise the AssertionError: check_loop_count never 1
#
assert i1 - 1 == i2
def test_no_loop_at_all(self):
mydriver = JitDriver(greens=[], reds=['m'])
def f2(m):
mydriver.jit_merge_point(m=m)
return m - 1
def f1(m):
while m > 0:
m = f2(m)
self.meta_interp(f1, [8])
# it should generate one "loop" only, which ends in a FINISH
# corresponding to the return from f2.
self.check_trace_count(1)
self.check_resops(jump=0)
def test_simple_loop(self):
mydriver = JitDriver(greens=[], reds=['m'])
def f1(m):
while m > 0:
mydriver.jit_merge_point(m=m)
m = m - 1
self.meta_interp(f1, [8])
self.check_trace_count(1)
self.check_resops({'jump': 1, 'guard_true': 2, 'int_gt': 2,
'int_sub': 2})
def test_void_red_variable(self):
mydriver = JitDriver(greens=[], reds=['m'])
def f1(m):
a = None
while m > 0:
mydriver.jit_merge_point(m=m)
m = m - 1
if m == 10:
pass # other case
self.meta_interp(f1, [18])
def test_bug_constant_int(self):
py.test.skip("crashes because a is a constant")
from rpython.rtyper.lltypesystem import lltype, rffi
mydriver = JitDriver(greens=['a'], reds=['m'])
def f1(m, a):
while m > 0:
mydriver.jit_merge_point(a=a, m=m)
m = m - 1
def entry(m):
f1(m, 42)
self.meta_interp(entry, [18])
def test_bug_constant_instance(self):
py.test.skip("crashes because a is a constant")
from rpython.rtyper.lltypesystem import lltype, rffi
mydriver = JitDriver(greens=['a'], reds=['m'])
class A(object):
pass
a1 = A()
def f1(m, a):
while m > 0:
mydriver.jit_merge_point(a=a, m=m)
m = m - 1
def entry(m):
f1(m, a1)
self.meta_interp(entry, [18])
def test_bug_constant_rawptrs(self):
py.test.skip("crashes because a is a constant")
from rpython.rtyper.lltypesystem import lltype, rffi
mydriver = JitDriver(greens=['a'], reds=['m'])
def f1(m):
a = lltype.nullptr(rffi.VOIDP.TO)
while m > 0:
mydriver.jit_merge_point(a=a, m=m)
m = m - 1
self.meta_interp(f1, [18])
def test_bug_rawptrs(self):
from rpython.rtyper.lltypesystem import lltype, rffi
mydriver = JitDriver(greens=['a'], reds=['m'])
def f1(m):
a = lltype.malloc(rffi.VOIDP.TO, 5, flavor='raw')
while m > 0:
mydriver.jit_merge_point(a=a, m=m)
m = m - 1
if m == 10:
pass
lltype.free(a, flavor='raw')
self.meta_interp(f1, [18])
def test_loop_automatic_reds(self):
myjitdriver = JitDriver(greens = ['m'], reds = 'auto')
def f(n, m):
res = 0
# try to have lots of red vars, so that if there is an error in
# the ordering of reds, there are low chances that the test passes
# by chance
a = b = c = d = n
while n > 0:
myjitdriver.jit_merge_point(m=m)
n -= 1
a += 1 # dummy unused red
b += 2 # dummy unused red
c += 3 # dummy unused red
d += 4 # dummy unused red
res += m*2
return res
expected = f(21, 5)
res = self.meta_interp(f, [21, 5])
assert res == expected
self.check_resops(int_sub=2, int_mul=0, int_add=10)
def test_loop_automatic_reds_with_floats_and_refs(self):
myjitdriver = JitDriver(greens = ['m'], reds = 'auto')
class MyObj(object):
def __init__(self, val):
self.val = val
def f(n, m):
res = 0
# try to have lots of red vars, so that if there is an error in
# the ordering of reds, there are low chances that the test passes
# by chance
i1 = i2 = i3 = i4 = n
f1 = f2 = f3 = f4 = float(n)
r1 = r2 = r3 = r4 = MyObj(n)
while n > 0:
myjitdriver.jit_merge_point(m=m)
n -= 1
i1 += 1 # dummy unused red
i2 += 2 # dummy unused red
i3 += 3 # dummy unused red
i4 += 4 # dummy unused red
f1 += 1 # dummy unused red
f2 += 2 # dummy unused red
f3 += 3 # dummy unused red
f4 += 4 # dummy unused red
r1.val += 1 # dummy unused red
r2.val += 2 # dummy unused red
r3.val += 3 # dummy unused red
r4.val += 4 # dummy unused red
res += m*2
return res
expected = f(21, 5)
res = self.meta_interp(f, [21, 5])
assert res == expected
self.check_resops(int_sub=2, int_mul=0, int_add=18, float_add=8)
def test_loop_automatic_reds_livevars_before_jit_merge_point(self):
myjitdriver = JitDriver(greens = ['m'], reds = 'auto')
def f(n, m):
res = 0
while n > 0:
n -= 1
myjitdriver.jit_merge_point(m=m)
res += m*2
return res
expected = f(21, 5)
res = self.meta_interp(f, [21, 5])
assert res == expected
self.check_resops(int_sub=2, int_mul=0, int_add=2)
def test_loop_automatic_reds_not_too_many_redvars(self):
myjitdriver = JitDriver(greens = ['m'], reds = 'auto')
def one():
return 1
def f(n, m):
res = 0
while n > 0:
n -= one()
myjitdriver.jit_merge_point(m=m)
res += m*2
return res
expected = f(21, 5)
res = self.meta_interp(f, [21, 5])
assert res == expected
oplabel = get_stats().loops[0].operations[0]
assert len(oplabel.getarglist()) == 2 # 'n', 'res' in some order
def test_inline_jit_merge_point(self):
py.test.skip("fix the test if you want to re-enable this")
# test that the machinery to inline jit_merge_points in callers
# works. The final user does not need to mess manually with the
# _inline_jit_merge_point_ attribute and similar, it is all nicely
# handled by @JitDriver.inline() (see next tests)
myjitdriver = JitDriver(greens = ['a'], reds = 'auto')
def jit_merge_point(a, b):
myjitdriver.jit_merge_point(a=a)
def add(a, b):
jit_merge_point(a, b)
return a+b
add._inline_jit_merge_point_ = jit_merge_point
myjitdriver.inline_jit_merge_point = True
def calc(n):
res = 0
while res < 1000:
res = add(n, res)
return res
def f():
return calc(1) + calc(3)
res = self.meta_interp(f, [])
assert res == 1000 + 1002
self.check_resops(int_add=4)
def test_jitdriver_inline(self):
py.test.skip("fix the test if you want to re-enable this")
myjitdriver = JitDriver(greens = [], reds = 'auto')
class MyRange(object):
def __init__(self, n):
self.cur = 0
self.n = n
def __iter__(self):
return self
def jit_merge_point(self):
myjitdriver.jit_merge_point()
@myjitdriver.inline(jit_merge_point)
def next(self):
if self.cur == self.n:
raise StopIteration
self.cur += 1
return self.cur
def f(n):
res = 0
for i in MyRange(n):
res += i
return res
expected = f(21)
res = self.meta_interp(f, [21])
assert res == expected
self.check_resops(int_eq=2, int_add=4)
self.check_trace_count(1)
def test_jitdriver_inline_twice(self):
py.test.skip("fix the test if you want to re-enable this")
myjitdriver = JitDriver(greens = [], reds = 'auto')
def jit_merge_point(a, b):
myjitdriver.jit_merge_point()
@myjitdriver.inline(jit_merge_point)
def add(a, b):
return a+b
def one(n):
res = 0
while res < 1000:
res = add(n, res)
return res
def two(n):
res = 0
while res < 2000:
res = add(n, res)
return res
def f(n):
return one(n) + two(n)
res = self.meta_interp(f, [1])
assert res == 3000
self.check_resops(int_add=4)
self.check_trace_count(2)
def test_jitdriver_inline_exception(self):
py.test.skip("fix the test if you want to re-enable this")
# this simulates what happens in a real case scenario: inside the next
# we have a call which we cannot inline (e.g. space.next in the case
# of W_InterpIterable), but we need to put it in a try/except block.
# With the first "inline_in_portal" approach, this case crashed
myjitdriver = JitDriver(greens = [], reds = 'auto')
def inc(x, n):
if x == n:
raise OverflowError
return x+1
inc._dont_inline_ = True
class MyRange(object):
def __init__(self, n):
self.cur = 0
self.n = n
def __iter__(self):
return self
def jit_merge_point(self):
myjitdriver.jit_merge_point()
@myjitdriver.inline(jit_merge_point)
def next(self):
try:
self.cur = inc(self.cur, self.n)
except OverflowError:
raise StopIteration
return self.cur
def f(n):
res = 0
for i in MyRange(n):
res += i
return res
expected = f(21)
res = self.meta_interp(f, [21])
assert res == expected
self.check_resops(int_eq=2, int_add=4)
self.check_trace_count(1)
def test_callback_jit_merge_point(self):
@jit_callback("testing")
def callback(a, b):
if a > b:
return 1
return -1
def main():
total = 0
for i in range(10):
total += callback(i, 2)
return total
res = self.meta_interp(main, [])
assert res == 7 - 3
self.check_trace_count(2)
def test_jitdriver_single_jit_merge_point(self):
jitdriver = JitDriver(greens=[], reds='auto')
def g1(n):
jitdriver.jit_merge_point()
return n
def g2():
jitdriver.jit_merge_point()
def f(n):
if n:
g1(n)
else:
g2()
e = py.test.raises(AssertionError, self.meta_interp, f, [42])
assert str(e.value) == ("there are multiple jit_merge_points "
"with the same jitdriver")
class TestWarmspotDirect(object):
def setup_class(cls):
from rpython.jit.metainterp.typesystem import llhelper
from rpython.jit.codewriter.support import annotate
from rpython.jit.metainterp.warmspot import WarmRunnerDesc
from rpython.rtyper.rclass import OBJECT, OBJECT_VTABLE
from rpython.rtyper.lltypesystem import lltype, llmemory
exc_vtable = lltype.malloc(OBJECT_VTABLE, immortal=True)
cls.exc_vtable = exc_vtable
class FakeFailDescr(object):
def __init__(self, no):
self.no = no
def handle_fail(self, deadframe, metainterp_sd, jitdrivers_sd):
no = self.no
assert deadframe._no == no
if no == 0:
raise jitexc.DoneWithThisFrameInt(3)
if no == 1:
raise jitexc.ContinueRunningNormally(
[0], [], [], [1], [], [])
if no == 3:
exc = lltype.malloc(OBJECT)
exc.typeptr = exc_vtable
raise jitexc.ExitFrameWithExceptionRef(
metainterp_sd.cpu,
lltype.cast_opaque_ptr(llmemory.GCREF, exc))
assert 0
class FakeDeadFrame:
def __init__(self, no):
self._no = no
class FakeDescr:
pass
class FakeCPU(object):
supports_floats = False
supports_longlong = False
supports_singlefloats = False
ts = llhelper
translate_support_code = False
stats = "stats"
class tracker:
pass
def setup_descrs(self):
return []
def get_latest_descr(self, deadframe):
assert isinstance(deadframe, FakeDeadFrame)
return self.get_fail_descr_from_number(deadframe._no)
def get_fail_descr_number(self, d):
return -1
def __init__(self, *args, **kwds):
pass
def nodescr(self, *args, **kwds):
return FakeDescr()
fielddescrof = nodescr
calldescrof = nodescr
sizeof = nodescr
def get_fail_descr_from_number(self, no):
return FakeFailDescr(no)
def make_execute_token(self, *ARGS):
return "not callable"
driver = JitDriver(reds = ['red'], greens = ['green'])
def f(green):
red = 0
while red < 10:
driver.can_enter_jit(red=red, green=green)
driver.jit_merge_point(red=red, green=green)
red += 1
return red
rtyper = annotate(f, [0])
FakeCPU.rtyper = rtyper
translator = rtyper.annotator.translator
translator.config.translation.gc = 'hybrid'
cls.desc = WarmRunnerDesc(translator, CPUClass=FakeCPU)
cls.FakeDeadFrame = FakeDeadFrame
def test_call_helper(self):
from rpython.rtyper.llinterp import LLException
[jd] = self.desc.jitdrivers_sd
FakeDeadFrame = self.FakeDeadFrame
assert jd._assembler_call_helper(FakeDeadFrame(0), 0) == 3
assert jd._assembler_call_helper(FakeDeadFrame(1), 0) == 10
try:
jd._assembler_call_helper(FakeDeadFrame(3), 0)
except LLException as lle:
assert lle[0] == self.exc_vtable
else:
py.test.fail("DID NOT RAISE")
| |
# -*- coding: utf-8 -*-
import shutil
import tempfile
import unittest
import os
# from stalker import (db, User, Repository, Status, FilenameTemplate, Structure,
# StatusList, ImageFormat, Project, Type, Task, Version)
from anima.representation import Representation
class RepresentationTestCase(unittest.TestCase):
"""tests anima.repr.Representation class
"""
temp_repo_path = tempfile.mkdtemp()
remove_these_files_buffer = []
@classmethod
def create_version(cls, task, take_name):
"""A helper method for creating a new version
:param task: the task
:param take_name: the take_name name
:return: the version
"""
from stalker import Version
from stalker.db.session import DBSession
v = Version(task=task, take_name=take_name)
DBSession.add(v)
DBSession.commit()
return v
@classmethod
def setUpClass(cls):
"""setup test
"""
# -----------------------------------------------------------------
# start of the setUp
# create the environment variable and point it to a temp directory
from stalker import db
database_url = "sqlite:///:memory:"
db.setup({'sqlalchemy.url': database_url})
db.init()
cls.temp_repo_path = tempfile.mkdtemp()
from stalker import User
cls.user1 = User(
name='User 1',
login='user1',
email='user1@users.com',
password='12345'
)
from stalker import Repository
cls.repo1 = Repository(
name='Test Project Repository',
linux_path=cls.temp_repo_path,
windows_path=cls.temp_repo_path,
osx_path=cls.temp_repo_path
)
from stalker import Status
cls.status_new = Status.query.filter_by(code='NEW').first()
cls.status_wip = Status.query.filter_by(code='WIP').first()
cls.status_comp = Status.query.filter_by(code='CMPL').first()
from stalker import FilenameTemplate
cls.task_template = FilenameTemplate(
name='Task Template',
target_entity_type='Task',
path='{{project.code}}/'
'{%- for parent_task in parent_tasks -%}'
'{{parent_task.nice_name}}/'
'{%- endfor -%}',
filename='{{version.nice_name}}'
'_v{{"%03d"|format(version.version_number)}}',
)
from stalker import Structure
cls.structure = Structure(
name='Project Struture',
templates=[cls.task_template]
)
from stalker import StatusList
cls.project_status_list = \
StatusList.query.filter_by(target_entity_type='Project').first()
from stalker import ImageFormat
cls.image_format = ImageFormat(
name='HD 1080',
width=1920,
height=1080,
pixel_aspect=1.0
)
# create a test project
from stalker import Project
cls.project = Project(
name='Test Project',
code='TP',
repository=cls.repo1,
status_list=cls.project_status_list,
structure=cls.structure,
image_format=cls.image_format
)
cls.task_status_list =\
StatusList.query.filter_by(target_entity_type='Task').first()
from stalker import Type
cls.character_type = Type(
name='Character',
code='CHAR',
target_entity_type='Asset'
)
from stalker import Task
# create a test series of root task
cls.task1 = Task(
name='Test Task 1',
project=cls.project
)
cls.task2 = Task(
name='Test Task 2',
project=cls.project
)
# commit everything
from stalker.db.session import DBSession
DBSession.add_all([
cls.repo1, cls.status_new, cls.status_wip, cls.status_comp,
cls.project_status_list, cls.project, cls.task_status_list,
cls.task1, cls.task2, cls.task_template
])
DBSession.commit()
cls.version1 = cls.create_version(cls.task1, 'Main')
cls.version2 = cls.create_version(cls.task1, 'Main')
cls.version3 = cls.create_version(cls.task1, 'Main')
# create other reprs
# BBOX
cls.version4 = cls.create_version(cls.task1, 'Main@BBox')
cls.version5 = cls.create_version(cls.task1, 'Main@BBox')
cls.version5.is_published = True
DBSession.commit()
# ASS
cls.version6 = cls.create_version(cls.task1, 'Main@ASS')
cls.version7 = cls.create_version(cls.task1, 'Main@ASS')
cls.version7.is_published = True
DBSession.commit()
# GPU
cls.version8 = cls.create_version(cls.task1, 'Main@GPU')
cls.version9 = cls.create_version(cls.task1, 'Main@GPU')
# Non default take name
cls.version10 = cls.create_version(cls.task1, 'alt1')
cls.version11 = cls.create_version(cls.task1, 'alt1')
# Hires
cls.version12 = cls.create_version(cls.task1, 'alt1@Hires')
cls.version13 = cls.create_version(cls.task1, 'alt1@Hires')
# Midres
cls.version14 = cls.create_version(cls.task1, 'alt1@Midres')
cls.version15 = cls.create_version(cls.task1, 'alt1@Midres')
# Lores
cls.version16 = cls.create_version(cls.task1, 'alt1@Lores')
cls.version17 = cls.create_version(cls.task1, 'alt1@Lores')
cls.version17.is_published = True
# No Repr
cls.version18 = cls.create_version(cls.task1, 'NoRepr')
cls.version19 = cls.create_version(cls.task1, 'NoRepr')
DBSession.commit()
# create a buffer for extra created files, which are to be removed
cls.remove_these_files_buffer = []
@classmethod
def tearDownClass(cls):
"""cleanup the test
"""
# set the db.session to None
from stalker.db.session import DBSession
DBSession.remove()
# delete the temp folder
shutil.rmtree(cls.temp_repo_path, ignore_errors=True)
for f in cls.remove_these_files_buffer:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f, True)
def test_list_all_lists_all_representations(self):
"""testing if Representation.list_all() returns a list of strings
showing the repr names.
"""
expected_result = ['Base', 'BBox', 'ASS', 'GPU']
rep = Representation(self.version1)
result = rep.list_all()
self.assertEqual(sorted(expected_result), sorted(result))
def test_list_all_lists_all_representations_from_non_base_version(self):
"""testing if Representation.list_all() returns a list of strings
showing the repr names by using non base version.
"""
expected_result = ['Base', 'Hires', 'Midres', 'Lores']
rep = Representation(self.version10)
result = rep.list_all()
self.assertEqual(sorted(expected_result), sorted(result))
def test_find_method_finds_the_given_representation(self):
"""testing if Representation.find() finds the latest version with the
given representation.
"""
rep = Representation(self.version1)
result = rep.find('BBox')
self.assertEqual(self.version5, result)
def test_find_method_finds_the_given_repr_from_different_repr(self):
"""testing if Representation.find() finds the latest version with the
given representation from a different representation than the base one.
"""
rep = Representation(self.version4)
result = rep.find('ASS')
self.assertEqual(self.version7, result)
def test_find_method_returns_none_for_invalid_repr_name(self):
"""testing if Representation.find() returns None for invalid or
nonexistent repr name
"""
rep = Representation(self.version4)
self.assertTrue(rep.find('NonExists') is None)
def test_has_any_repr_method_is_working_properly(self):
"""testing if Representation.has_any_repr() method is working properly
"""
rep = Representation(self.version1)
self.assertTrue(rep.has_any_repr())
rep.version = self.version17
self.assertTrue(rep.has_any_repr())
rep.version = self.version19
self.assertFalse(rep.has_any_repr())
def test_has_repr_method_is_working_properly(self):
"""testing if Representation.has_repr() method is working properly
"""
rep = Representation(self.version1)
self.assertTrue(rep.has_repr('BBox'))
rep.version = self.version17
self.assertTrue(rep.has_repr('Lores'))
rep.version = self.version19
self.assertFalse(rep.has_repr('BBox'))
def test_get_base_take_name_is_working_properly(self):
"""testing if the Representation.get_base_take_name() method is working
properly
"""
rep = Representation()
self.assertEqual('Main', rep.get_base_take_name(self.version1))
self.assertEqual('alt1', rep.get_base_take_name(self.version10))
self.assertEqual('alt1', rep.get_base_take_name(self.version12))
self.assertEqual('NoRepr', rep.get_base_take_name(self.version18))
def test_version_argument_is_skipped(self):
"""testing if it is possible to skip the version argument
"""
rep = Representation()
self.assertTrue(rep.version is None)
def test_version_argument_is_none(self):
"""testing if the version argument can be None
"""
rep = Representation(None)
self.assertTrue(rep.version is None)
def test_version_attribute_is_set_to_none(self):
"""testing if setting the version attribute to None is possible
"""
rep = Representation(self.version1)
self.assertFalse(rep.version is None)
rep.version = None
self.assertTrue(rep.version is None)
def test_version_argument_is_not_a_version_instance(self):
"""testing if a TypeError will be raised when the version argument is
not a Version instance
"""
with self.assertRaises(TypeError) as cm:
Representation('not a version')
self.assertEqual(
'Representation.version should be a '
'stalker.models.version.Version instance, not str',
str(cm.exception)
)
def test_version_attribute_is_not_a_version_instance(self):
"""testing if a TypeError will be raised when the version attribute is
set to a value other then None and a Version instance
"""
rep = Representation()
with self.assertRaises(TypeError) as cm:
rep.version = 'not a version'
self.assertEqual(
'Representation.version should be a '
'stalker.models.version.Version instance, not str',
str(cm.exception)
)
def test_version_argument_is_working_properly(self):
"""testing if the version argument value is correctly passed to the
version attribute
"""
rep = Representation(self.version1)
self.assertEqual(rep.version, self.version1)
def test_version_attribute_is_working_properly(self):
"""testing if the version attribute is working properly
"""
rep = Representation(self.version1)
self.assertNotEqual(rep.version, self.version2)
rep.version = self.version2
self.assertEqual(rep.version, self.version2)
def test_is_base_method_is_working_properly(self):
"""testing if Representation.is_base() method is working properly
"""
rep = Representation(self.version1)
self.assertTrue(rep.is_base())
rep = Representation(self.version4)
self.assertFalse(rep.is_base())
def test_is_repr_method_is_working_properly(self):
"""testing if Representation.is_repr() method is working properly
"""
rep = Representation(self.version1)
self.assertTrue(rep.is_repr('Base'))
rep = Representation(self.version4)
self.assertFalse(rep.is_repr('Base'))
rep = Representation(self.version4)
self.assertTrue(rep.is_repr('BBox'))
def test_repr_property_is_working_properly(self):
"""testing if Representation.repr property is working properly
"""
rep = Representation(self.version1)
self.assertEqual(rep.repr, 'Base')
rep = Representation(self.version4)
self.assertTrue(rep.repr, 'BBox')
| |
import json
from django.test.client import Client
from mock import *
from django.contrib.auth.models import User, Group
from survey.models.users import UserProfile
from survey.tests.base_test import BaseTest
from survey.forms.users import UserForm, EditUserForm
class UsersViewTest(BaseTest):
def setUp(self):
self.client = Client()
self.user_without_permission = User.objects.create_user(username='useless', email='rajni@kant.com', password='I_Suck')
self.raj = self.assign_permission_to(User.objects.create_user('Rajni', 'rajni@kant.com', 'I_Rock'), 'can_view_users')
self.client.login(username='Rajni', password='I_Rock')
def test_new(self):
response = self.client.get('/users/new/')
self.failUnlessEqual(response.status_code, 200)
templates = [template.name for template in response.templates]
self.assertIn('users/new.html', templates)
self.assertEquals(response.context['action'], '/users/new/')
self.assertEquals(response.context['id'], 'create-user-form')
self.assertEquals(response.context['class'], 'user-form')
self.assertEquals(response.context['button_label'], 'Create')
self.assertEquals(response.context['loading_text'], 'Creating...')
self.assertEquals(response.context['country_phone_code'], '256')
self.assertIsInstance(response.context['userform'], UserForm)
self.assertEqual(response.context['title'], 'New User')
@patch('django.contrib.messages.success')
def test_create_users(self, success_message):
some_group = Group.objects.create()
form_data = {
'username':'knight',
'password1':'mk',
'password2':'mk',
'first_name':'michael',
'last_name':'knight',
'mobile_number':'123456789',
'email':'mm@mm.mm',
'groups':some_group.id,
}
user = User.objects.filter(username=form_data['username'])
self.failIf(user)
response = self.client.post('/users/new/', data=form_data)
self.failUnlessEqual(response.status_code, 302)
user = User.objects.get(username=form_data['username'])
self.failUnless(user.id)
for key in ['username', 'first_name', 'last_name', 'email']:
value = getattr(user, key)
self.assertEqual(form_data[key], str(value))
user_groups = user.groups.all()
self.assertEquals(len(user_groups), 1)
self.assertIn(some_group, user_groups)
user_profile = UserProfile.objects.filter(user=user)
self.failUnless(user_profile)
self.assertEquals(user_profile[0].mobile_number, form_data['mobile_number'])
assert success_message.called
def test_create_users_unsuccessful(self):
some_group = Group.objects.create()
form_data = {
'username':'knight',
'password':'mk',
'confirm_password':'mk',
'first_name':'michael',
'last_name':'knight',
'mobile_number':'123456789',
'email':'mm@mm.mm',
'groups':some_group.id,
}
user = User.objects.filter(username=form_data['username'])
self.failIf(user)
form_data['confirm_password']='hahahaha'
response = self.client.post('/users/new/', data=form_data)
self.failUnlessEqual(response.status_code, 200)
user = User.objects.filter(username=form_data['username'])
self.failIf(user)
print response.context['messages']._loaded_messages[0].message
print dir(response.context['messages']._loaded_messages[0].message)
# assert False
self.assertEqual(1, len(response.context['messages']._loaded_messages))
self.assertIn("User not registered. See errors below.", response.context['messages']._loaded_messages[0].message)
form_data['confirm_password']= form_data['password']
unexisting_group_id = 123456677
form_data['groups'] = unexisting_group_id
response = self.client.post('/users/new/', data=form_data)
self.failUnlessEqual(response.status_code, 200)
user = User.objects.filter(username=form_data['username'])
self.failIf(user)
self.assertEqual(1, len(response.context['messages']._loaded_messages))
self.assertIn("User not registered. See errors below.", response.context['messages']._loaded_messages[0].message)
form_data['groups']= some_group.id
user = User.objects.create(username='some_other_name')
userprofile = UserProfile.objects.create(user=user, mobile_number=form_data['mobile_number'])
response = self.client.post('/users/new/', data=form_data)
self.failUnlessEqual(response.status_code, 200)
user = User.objects.filter(username=form_data['username'])
self.failIf(user)
self.assertEqual(1, len(response.context['messages']._loaded_messages))
self.assertIn("User not registered. See errors below.", response.context['messages']._loaded_messages[0].message)
def test_index(self):
response = self.client.get('/users/')
self.failUnlessEqual(response.status_code, 200)
def test_check_mobile_number(self):
user = User.objects.create(username='some_other_name')
userprofile = UserProfile.objects.create(user=user, mobile_number='123456789')
response = self.client.get('/users/?mobile_number=987654321')
self.failUnlessEqual(response.status_code, 200)
json_response = json.loads(response.content)
self.assertTrue(json_response)
response = self.client.get("/users/?mobile_number=" + userprofile.mobile_number)
self.failUnlessEqual(response.status_code, 200)
json_response = json.loads(response.content)
self.assertFalse(json_response)
def test_check_username(self):
user = User.objects.create(username='some_other_name')
response = self.client.get('/users/?username=rajni')
self.failUnlessEqual(response.status_code, 200)
json_response = json.loads(response.content)
self.assertTrue(json_response)
response = self.client.get("/users/?username=" + user.username)
self.failUnlessEqual(response.status_code, 200)
json_response = json.loads(response.content)
self.assertFalse(json_response)
def test_check_email(self):
user = User.objects.create(email='haha@ha.ha')
self.client.login(username='Rajni', password='I_suck')
response = self.client.get('/users/?email=bla@bla.bl')
self.failUnlessEqual(response.status_code, 200)
json_response = json.loads(response.content)
self.assertTrue(json_response)
response = self.client.get("/users/?email=" + user.email)
self.failUnlessEqual(response.status_code, 200)
json_response = json.loads(response.content)
self.assertFalse(json_response)
def test_list_users(self):
user = User.objects.create()
response = self.client.get('/users/')
self.failUnlessEqual(response.status_code, 200)
templates = [template.name for template in response.templates]
self.assertIn('users/index.html', templates)
self.assertEqual(len(response.context['users']), 3)
self.assertIn(user, response.context['users'])
self.assertNotEqual(None, response.context['request'])
def test_edit_user_view(self):
user = User.objects.create_user('andrew', 'a@m.vom', 'pass')
UserProfile.objects.create(user=user, mobile_number='200202020')
url = "/users/"+str(user.pk)+"/edit/"
response = self.client.get(url)
self.failUnlessEqual(response.status_code, 200)
templates = [template.name for template in response.templates]
self.assertIn('users/new.html', templates)
self.assertEquals(response.context['action'], url)
self.assertEquals(response.context['id'], 'edit-user-form')
self.assertEquals(response.context['class'], 'user-form')
self.assertEquals(response.context['title'], 'Edit User')
self.assertEquals(response.context['button_label'], 'Save')
self.assertEquals(response.context['loading_text'], 'Saving...')
self.assertEquals(response.context['country_phone_code'], '256')
self.assertIsInstance(response.context['userform'], EditUserForm)
def test_edit_user_updates_user_information(self):
form_data = {
'username':'knight',
'password':'mk',
'confirm_password':'mk',
'first_name':'michael',
'last_name':'knight',
'mobile_number':'123456789',
'email':'mm@mm.mm',
}
self.failIf(User.objects.filter(username=form_data['username']))
user = User.objects.create(username=form_data['username'], email=form_data['email'], password=form_data['password'])
UserProfile.objects.create(user=user, mobile_number=form_data['mobile_number'])
data = {
'username':'knight',
'password':'mk',
'confirm_password':'mk',
'first_name':'michael',
'last_name':'knightngale',
'mobile_number':'123456789',
'email':'mm@mm.mm',
}
response = self.client.post('/users/'+str(user.pk)+'/edit/', data=data)
self.failUnlessEqual(response.status_code, 302)
edited_user = User.objects.filter(last_name=data['last_name'])
self.assertEqual(1, edited_user.count())
self.assertTrue(edited_user[0].check_password(data['password']))
def test_edit_username_not_allowed(self):
form_data = {
'username':'knight',
'password':'mk',
'confirm_password':'mk',
'first_name':'michael',
'last_name':'knight',
'mobile_number':'123456789',
'email':'mm@mm.mm',
}
self.failIf(User.objects.filter(username=form_data['username']))
user = User.objects.create(username=form_data['username'], email=form_data['email'], password=form_data['password'])
UserProfile.objects.create(user=user, mobile_number=form_data['mobile_number'])
data = form_data.copy()
data['username'] = 'changed'
response = self.client.post('/users/'+str(user.pk)+'/edit/', data=data)
self.failUnlessEqual(response.status_code, 200)
edited_user = User.objects.filter(username=data['username'])
self.failIf(edited_user)
original_user = User.objects.filter(username=form_data['username'], email=form_data['email'])
self.failUnless(original_user)
self.assertEqual(1, len(response.context['messages']._loaded_messages))
self.assertIn("User not edited. See errors below.", response.context['messages']._loaded_messages[0].message)
def test_current_user_edits_his_own_profile(self):
form_data = {
'username':'knight',
'password':'mk',
'confirm_password':'mk',
'first_name':'michael',
'last_name':'knight',
'mobile_number':'123456789',
'email':'mm@mm.mm',
}
self.failIf(User.objects.filter(username=form_data['username']))
user_without_permission = User.objects.create(username=form_data['username'], email=form_data['email'])
user_without_permission.set_password(form_data['password'])
user_without_permission.save()
UserProfile.objects.create(user=user_without_permission, mobile_number=form_data['mobile_number'])
self.client.logout()
self.client.login(username=form_data['username'], password=form_data['password'])
data = {
'username':'knight',
'first_name':'michael',
'password':'changed mk',
'confirm_password':'changed mk',
'last_name':'knightngale',
'mobile_number':'123456789',
'email':'mm@mm.mm',
}
response = self.client.post('/users/'+str(user_without_permission.pk)+'/edit/', data=data)
self.failUnlessEqual(response.status_code, 302)
edited_user = User.objects.filter(last_name=data['last_name'])
self.assertEqual(1, edited_user.count())
self.assertTrue(edited_user[0].check_password(form_data['password']))
self.assertFalse(edited_user[0].check_password(data['password']))
def test_a_non_admin_user_cannot_POST_edit_other_users_profile(self):
user_without_permission = User.objects.create_user(username='notpermitted', email='rajni@kant.com', password='I_Suck')
self.client.logout()
self.client.login(username=user_without_permission.username, password='I_Suck')
data = {
'username':'knight',
'first_name':'michael',
'last_name':'knightngale',
'mobile_number':'123456789',
'email':'mm@mm.mm',
}
original_rajni_attributes = User.objects.filter(username=self.raj).values()[0]
edit_rajni_url = '/users/%s/edit/'%self.raj.pk
response = self.client.post(edit_rajni_url, data=data)
self.assertRedirects(response, expected_url="/accounts/login/?next=%s"%edit_rajni_url)
message = "Current user, %s, is not allowed to perform this action. " \
"Please log in a user with enough privileges." %user_without_permission.get_full_name()
self.assertIn(message, response.cookies['messages'].value)
retrieved_rajni = User.objects.filter(**original_rajni_attributes)
self.assertEqual(1, retrieved_rajni.count())
def test_a_non_admin_user_cannot_GET_edit_other_users_profile(self):
user_without_permission = User.objects.create_user(username='notpermitted', email='rajni@kant.com', password='I_Suck')
self.client.logout()
self.client.login(username=user_without_permission.username, password='I_Suck')
original_rajni_attributes = User.objects.filter(username=self.raj).values()[0]
edit_rajni_url = '/users/%s/edit/'%self.raj.pk
response = self.client.get(edit_rajni_url)
self.assertRedirects(response, expected_url="/accounts/login/?next=%s"%edit_rajni_url)
message = "Current user, %s, is not allowed to perform this action. " \
"Please log in a user with enough privileges." %user_without_permission.get_full_name()
self.assertIn(message, response.cookies['messages'].value)
retrieved_rajni = User.objects.filter(**original_rajni_attributes)
self.assertEqual(1, retrieved_rajni.count())
def test_view_user_details(self):
user = User.objects.create_user(username='rrrajni', email='rrajni@kant.com',
password='I_Rock_0', first_name='some name', last_name='last_name')
UserProfile.objects.create(user=user, mobile_number='123456666')
response = self.client.get('/users/%d/'%user.id)
self.failUnlessEqual(response.status_code, 200)
templates = [template.name for template in response.templates]
self.assertIn('users/show.html', templates)
self.assertEquals(response.context['the_user'], user)
self.assertEquals(response.context['cancel_url'], '/users/')
def test_view_user_details_when_no_such_user_exists(self):
non_existing_user_id = 111
response = self.client.get('/users/%d/'%non_existing_user_id)
self.assertRedirects(response, expected_url="/users/")
self.assertIn("User not found.", response.cookies['messages'].value)
def test_deactivate_user(self):
user = User.objects.create_user(username='rrrajni', email='rrajni@kant.com',
password='I_Rock_0', first_name='some name', last_name='last_name')
UserProfile.objects.create(user=user, mobile_number='123456666')
response = self.client.get('/users/%d/deactivate/'%user.id)
self.assertRedirects(response, expected_url="/users/")
self.assertIn("User %s successfully deactivated."%user.username, response.cookies['messages'].value)
def test_deactivate_user_when_no_such_user_exist(self):
non_existing_user_id = 222
response = self.client.get('/users/%d/deactivate/'%non_existing_user_id)
self.assertRedirects(response, expected_url="/users/")
self.assertIn("User not found.", response.cookies['messages'].value)
def test_reactivate_user(self):
user = User.objects.create_user(username='rrrajni', email='rrajni@kant.com',
password='I_Rock_0', first_name='some name', last_name='last_name')
UserProfile.objects.create(user=user, mobile_number='123456666')
user.is_active = False
user.save()
self.assertFalse(user.is_active)
response = self.client.get('/users/%d/activate/'%user.id)
self.assertRedirects(response, expected_url="/users/")
self.assertIn("User %s successfully re-activated."%user.username, response.cookies['messages'].value)
def test_deactivate_user_when_no_such_user_exist(self):
non_existing_user_id = 222
response = self.client.get('/users/%d/activate/'%non_existing_user_id)
self.assertRedirects(response, expected_url="/users/")
self.assertIn("User not found.", response.cookies['messages'].value)
def test_restricted_permission(self):
self.assert_restricted_permission_for('/users/new/')
self.assert_restricted_permission_for('/users/')
self.assert_restricted_permission_for('/users/1/')
self.assert_restricted_permission_for('/users/1/deactivate/')
self.assert_restricted_permission_for('/users/1/activate/')
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Script to update the dependencies in various configuration files."""
import os
import sys
# Change PYTHONPATH to include plaso.
sys.path.insert(0, u'.')
import plaso.dependencies
class DPKGControlWriter(object):
"""Class to help write a dpkg control file."""
_PATH = os.path.join(u'config', u'dpkg', u'control')
_MAINTAINER = (
u'Log2Timeline maintainers <log2timeline-maintainers@googlegroups.com>')
_FILE_HEADER = [
u'Source: plaso',
u'Section: python',
u'Priority: extra',
u'Maintainer: {0:s}'.format(_MAINTAINER),
u'Build-Depends: debhelper (>= 7.0.0), python, python-setuptools',
u'Standards-Version: 3.9.5',
u'X-Python-Version: >= 2.7',
u'Homepage: https://github.com/log2timeline/plaso',
u'',
u'Package: python-plaso',
u'Architecture: all']
_FILE_FOOTER = [
u'Description: Super timeline all the things',
u' Log2Timeline is a framework to create super timelines. Its purpose',
u' is to extract timestamps from various files found on typical computer',
(u' systems and aggregate them. Plaso is the Python rewrite of '
u'log2timeline.'),
u'']
def Write(self):
"""Writes a dpkg control file."""
file_content = []
file_content.extend(self._FILE_HEADER)
dependencies = plaso.dependencies.GetDPKGDepends()
dependencies = u', '.join(dependencies)
file_content.append(
u'Depends: {0:s}, ${{python:Depends}}, ${{misc:Depends}}'.format(
dependencies))
file_content.extend(self._FILE_FOOTER)
file_content = u'\n'.join(file_content)
file_content = file_content.encode(u'utf-8')
with open(self._PATH, 'wb') as file_object:
file_object.write(file_content)
class GIFTInstallScriptWriter(object):
"""Class to help write the install_gift_and_dependencies.sh file."""
_PATH = os.path.join(
u'config', u'linux', u'install_gift_and_dependencies.sh')
_FILE_HEADER = [
u'#!/usr/bin/env bash',
u'set -e',
u'',
u'# Dependencies for running Plaso, alphabetized, one per line.',
(u'# This should not include packages only required for testing or '
u'development.')]
_FILE_FOOTER = [
u'',
u'# Additional dependencies for running Plaso tests, alphabetized,',
u'# one per line.',
u'TEST_DEPENDENCIES="python-mock";',
u'',
u'# Additional dependencies for doing Plaso debugging, alphabetized,',
u'# one per line.',
u'DEBUG_DEPENDENCIES="python-guppy";',
u'',
u'# Additional dependencies for doing Plaso development, alphabetized,',
u'# one per line.',
u'DEVELOPMENT_DEPENDENCIES="python-sphinx',
u' pylint";',
u'',
u'sudo add-apt-repository ppa:gift/dev -y',
u'sudo apt-get update -q',
u'sudo apt-get install -y ${PLASO_DEPENDENCIES}',
u'',
u'if [[ "$*" =~ "include-debug" ]]; then',
u' sudo apt-get install -y ${DEBUG_DEPENDENCIES}',
u'fi',
u'',
u'if [[ "$*" =~ "include-development" ]]; then',
u' sudo apt-get install -y ${DEVELOPMENT_DEPENDENCIES}',
u'fi',
u'',
u'if [[ "$*" =~ "include-test" ]]; then',
u' sudo apt-get install -y ${TEST_DEPENDENCIES}',
u'fi',
u'']
def Write(self):
"""Writes a install_gift_and_dependencies.sh file."""
file_content = []
file_content.extend(self._FILE_HEADER)
dependencies = plaso.dependencies.GetDPKGDepends(exclude_version=True)
for index, dependency in enumerate(dependencies):
if index == 0:
file_content.append(u'PLASO_DEPENDENCIES="{0:s}'.format(dependency))
elif index + 1 == len(dependencies):
file_content.append(u' {0:s}";'.format(dependency))
else:
file_content.append(u' {0:s}'.format(dependency))
file_content.extend(self._FILE_FOOTER)
file_content = u'\n'.join(file_content)
file_content = file_content.encode(u'utf-8')
with open(self._PATH, 'wb') as file_object:
file_object.write(file_content)
class RequirementsWriter(object):
"""Class to help write a requirements.txt file."""
_PATH = u'requirements.txt'
_FILE_HEADER = [
u'pip >= 7.0.0',
u'pytest',
u'mock']
def Write(self):
"""Writes a requirements.txt file."""
file_content = []
file_content.extend(self._FILE_HEADER)
for dependency in plaso.dependencies.GetInstallRequires():
file_content.append(u'{0:s}'.format(dependency))
file_content = u'\n'.join(file_content)
file_content = file_content.encode(u'utf-8')
with open(self._PATH, 'wb') as file_object:
file_object.write(file_content)
class SetupCfgWriter(object):
"""Class to help write a setup.cfg file."""
_PATH = u'setup.cfg'
_MAINTAINER = (
u'Log2Timeline maintainers <log2timeline-maintainers@googlegroups.com>')
_FILE_HEADER = [
u'[sdist]',
u'template = MANIFEST.in',
u'manifest = MANIFEST',
u'',
u'[sdist_test_data]',
u'template = MANIFEST.test_data.in',
u'manifest = MANIFEST.test_data',
u'',
u'[bdist_rpm]',
u'release = 1',
u'packager = {0:s}'.format(_MAINTAINER),
u'doc_files = ACKNOWLEDGEMENTS',
u' AUTHORS',
u' LICENSE',
u' README',
u'build_requires = python-setuptools']
def Write(self):
"""Writes a setup.cfg file."""
file_content = []
file_content.extend(self._FILE_HEADER)
dependencies = plaso.dependencies.GetRPMRequires()
for index, dependency in enumerate(dependencies):
if index == 0:
file_content.append(u'requires = {0:s}'.format(dependency))
else:
file_content.append(u' {0:s}'.format(dependency))
file_content = u'\n'.join(file_content)
file_content = file_content.encode(u'utf-8')
with open(self._PATH, 'wb') as file_object:
file_object.write(file_content)
class TravisBeforeInstallScript(object):
"""Class to help write the Travis-CI install.sh file."""
_PATH = os.path.join(u'config', u'travis', u'install.sh')
_FILE_HEADER = [
u'#!/bin/bash',
u'#',
u'# Script to set up Travis-CI test VM.',
u'',
(u'COVERALL_DEPENDENCIES="python-coverage python-coveralls '
u'python-docopt";'),
u'']
# TODO: add Python3 dependencies.
_FILE_FOOTER = [
u'',
u'# Exit on error.',
u'set -e;',
u'',
u'if test `uname -s` = "Darwin";',
u'then',
u'\tgit clone https://github.com/log2timeline/l2tdevtools.git;',
u'',
u'\tmv l2tdevtools ../;',
u'\tmkdir dependencies;',
u'',
(u'\tPYTHONPATH=../l2tdevtools ../l2tdevtools/tools/update.py '
u'--download-directory=dependencies --preset=plaso;'),
u'',
u'elif test `uname -s` = "Linux";',
u'then',
u'\tsudo rm -f /etc/apt/sources.list.d/travis_ci_zeromq3-source.list;',
u'',
u'\tsudo add-apt-repository ppa:gift/dev -y;',
u'\tsudo apt-get update -q;',
(u'\tsudo apt-get install -y ${COVERALL_DEPENDENCIES} '
u'${PYTHON2_DEPENDENCIES} ${PYTHON2_TEST_DEPENDENCIES};'),
u'fi',
u'']
def Write(self):
"""Writes a setup.cfg file."""
file_content = []
file_content.extend(self._FILE_HEADER)
dependencies = plaso.dependencies.GetDPKGDepends(exclude_version=True)
dependencies = u' '.join(dependencies)
file_content.append(u'PYTHON2_DEPENDENCIES="{0:s}";'.format(dependencies))
file_content.append(u'')
# TODO: determine test dependencies from plaso.dependencies.
file_content.append(u'PYTHON2_TEST_DEPENDENCIES="python-mock";')
file_content.append(u'')
dependencies = plaso.dependencies.GetDPKGDepends(exclude_version=True)
dependencies = u' '.join(dependencies)
dependencies = dependencies.replace(u'python', u'python3')
file_content.append(u'PYTHON3_DEPENDENCIES="{0:s}";'.format(dependencies))
file_content.append(u'')
# TODO: determine test dependencies from plaso.dependencies.
file_content.append(u'PYTHON3_TEST_DEPENDENCIES="python3-mock";')
file_content.extend(self._FILE_FOOTER)
file_content = u'\n'.join(file_content)
file_content = file_content.encode(u'utf-8')
with open(self._PATH, 'wb') as file_object:
file_object.write(file_content)
if __name__ == u'__main__':
writer = DPKGControlWriter()
writer.Write()
writer = GIFTInstallScriptWriter()
writer.Write()
writer = RequirementsWriter()
writer.Write()
writer = SetupCfgWriter()
writer.Write()
writer = TravisBeforeInstallScript()
writer.Write()
| |
import string
from sympy import (bernoulli, Symbol, symbols, Dummy, S, Sum, Rational,
oo, zoo, pi, I, simplify, expand_func, harmonic,
bell, fibonacci, lucas, euler, catalan, binomial, gamma,
sqrt, hyper, log, digamma, trigamma, polygamma, diff,
EulerGamma, factorial, sin, cos, cot, cancel, zeta)
from sympy.utilities.pytest import XFAIL, raises
x = Symbol('x')
def test_bernoulli():
assert bernoulli(0) == 1
assert bernoulli(1) == Rational(-1, 2)
assert bernoulli(2) == Rational(1, 6)
assert bernoulli(3) == 0
assert bernoulli(4) == Rational(-1, 30)
assert bernoulli(5) == 0
assert bernoulli(6) == Rational(1, 42)
assert bernoulli(7) == 0
assert bernoulli(8) == Rational(-1, 30)
assert bernoulli(10) == Rational(5, 66)
assert bernoulli(1000001) == 0
assert bernoulli(0, x) == 1
assert bernoulli(1, x) == x - Rational(1, 2)
assert bernoulli(2, x) == x**2 - x + Rational(1, 6)
assert bernoulli(3, x) == x**3 - (3*x**2)/2 + x/2
# Should be fast; computed with mpmath
b = bernoulli(1000)
assert b.p % 10**10 == 7950421099
assert b.q == 342999030
b = bernoulli(10**6, evaluate=False).evalf()
assert str(b) == '-2.23799235765713e+4767529'
def test_fibonacci():
assert [fibonacci(n) for n in range(-3, 5)] == [2, -1, 1, 0, 1, 1, 2, 3]
assert fibonacci(100) == 354224848179261915075
assert [lucas(n) for n in range(-3, 5)] == [-4, 3, -1, 2, 1, 3, 4, 7]
assert lucas(100) == 792070839848372253127
assert fibonacci(1, x) == 1
assert fibonacci(2, x) == x
assert fibonacci(3, x) == x**2 + 1
assert fibonacci(4, x) == x**3 + 2*x
def test_bell():
assert [bell(n) for n in range(8)] == [1, 1, 2, 5, 15, 52, 203, 877]
assert bell(0, x) == 1
assert bell(1, x) == x
assert bell(2, x) == x**2 + x
assert bell(5, x) == x**5 + 10*x**4 + 25*x**3 + 15*x**2 + x
X = symbols('x:6')
# X = (x0, x1, .. x5)
# at the same time: X[1] = x1, X[2] = x2 for standard readablity.
# but we must supply zero-based indexed object X[1:] = (x1, .. x5)
assert bell(6, 2, X[1:]) == 6*X[5]*X[1] + 15*X[4]*X[2] + 10*X[3]**2
assert bell(
6, 3, X[1:]) == 15*X[4]*X[1]**2 + 60*X[3]*X[2]*X[1] + 15*X[2]**3
X = (1, 10, 100, 1000, 10000)
assert bell(6, 2, X) == (6 + 15 + 10)*10000
X = (1, 2, 3, 3, 5)
assert bell(6, 2, X) == 6*5 + 15*3*2 + 10*3**2
X = (1, 2, 3, 5)
assert bell(6, 3, X) == 15*5 + 60*3*2 + 15*2**3
def test_harmonic():
n = Symbol("n")
assert harmonic(n, 0) == n
assert harmonic(n, 1) == harmonic(n)
assert harmonic(0, 1) == 0
assert harmonic(1, 1) == 1
assert harmonic(2, 1) == Rational(3, 2)
assert harmonic(3, 1) == Rational(11, 6)
assert harmonic(4, 1) == Rational(25, 12)
assert harmonic(0, 2) == 0
assert harmonic(1, 2) == 1
assert harmonic(2, 2) == Rational(5, 4)
assert harmonic(3, 2) == Rational(49, 36)
assert harmonic(4, 2) == Rational(205, 144)
assert harmonic(0, 3) == 0
assert harmonic(1, 3) == 1
assert harmonic(2, 3) == Rational(9, 8)
assert harmonic(3, 3) == Rational(251, 216)
assert harmonic(4, 3) == Rational(2035, 1728)
assert harmonic(oo, -1) == S.NaN
assert harmonic(oo, 0) == oo
assert harmonic(oo, S.Half) == oo
assert harmonic(oo, 1) == oo
assert harmonic(oo, 2) == (pi**2)/6
assert harmonic(oo, 3) == zeta(3)
def test_harmonic_rational():
ne = S(6)
no = S(5)
pe = S(8)
po = S(9)
qe = S(10)
qo = S(13)
Heee = harmonic(ne + pe/qe)
Aeee = (-log(10) + 2*(-1/S(4) + sqrt(5)/4)*log(sqrt(-sqrt(5)/8 + 5/S(8)))
+ 2*(-sqrt(5)/4 - 1/S(4))*log(sqrt(sqrt(5)/8 + 5/S(8)))
+ pi*(1/S(4) + sqrt(5)/4)/(2*sqrt(-sqrt(5)/8 + 5/S(8)))
+ 13944145/S(4720968))
Heeo = harmonic(ne + pe/qo)
Aeeo = (-log(26) + 2*log(sin(3*pi/13))*cos(4*pi/13) + 2*log(sin(2*pi/13))*cos(32*pi/13)
+ 2*log(sin(5*pi/13))*cos(80*pi/13) - 2*log(sin(6*pi/13))*cos(5*pi/13)
- 2*log(sin(4*pi/13))*cos(pi/13) + pi*cot(5*pi/13)/2 - 2*log(sin(pi/13))*cos(3*pi/13)
+ 2422020029/S(702257080))
Heoe = harmonic(ne + po/qe)
Aeoe = (-log(20) + 2*(1/S(4) + sqrt(5)/4)*log(-1/S(4) + sqrt(5)/4)
+ 2*(-1/S(4) + sqrt(5)/4)*log(sqrt(-sqrt(5)/8 + 5/S(8)))
+ 2*(-sqrt(5)/4 - 1/S(4))*log(sqrt(sqrt(5)/8 + 5/S(8)))
+ 2*(-sqrt(5)/4 + 1/S(4))*log(1/S(4) + sqrt(5)/4)
+ 11818877030/S(4286604231) - pi*sqrt(sqrt(5)/8 + 5/S(8))/(-sqrt(5)/2 + 1/S(2)) )
Heoo = harmonic(ne + po/qo)
Aeoo = (-log(26) + 2*log(sin(3*pi/13))*cos(54*pi/13) + 2*log(sin(4*pi/13))*cos(6*pi/13)
+ 2*log(sin(6*pi/13))*cos(108*pi/13) - 2*log(sin(5*pi/13))*cos(pi/13)
- 2*log(sin(pi/13))*cos(5*pi/13) + pi*cot(4*pi/13)/2
- 2*log(sin(2*pi/13))*cos(3*pi/13) + 11669332571/S(3628714320))
Hoee = harmonic(no + pe/qe)
Aoee = (-log(10) + 2*(-1/S(4) + sqrt(5)/4)*log(sqrt(-sqrt(5)/8 + 5/S(8)))
+ 2*(-sqrt(5)/4 - 1/S(4))*log(sqrt(sqrt(5)/8 + 5/S(8)))
+ pi*(1/S(4) + sqrt(5)/4)/(2*sqrt(-sqrt(5)/8 + 5/S(8)))
+ 779405/S(277704))
Hoeo = harmonic(no + pe/qo)
Aoeo = (-log(26) + 2*log(sin(3*pi/13))*cos(4*pi/13) + 2*log(sin(2*pi/13))*cos(32*pi/13)
+ 2*log(sin(5*pi/13))*cos(80*pi/13) - 2*log(sin(6*pi/13))*cos(5*pi/13)
- 2*log(sin(4*pi/13))*cos(pi/13) + pi*cot(5*pi/13)/2
- 2*log(sin(pi/13))*cos(3*pi/13) + 53857323/S(16331560))
Hooe = harmonic(no + po/qe)
Aooe = (-log(20) + 2*(1/S(4) + sqrt(5)/4)*log(-1/S(4) + sqrt(5)/4)
+ 2*(-1/S(4) + sqrt(5)/4)*log(sqrt(-sqrt(5)/8 + 5/S(8)))
+ 2*(-sqrt(5)/4 - 1/S(4))*log(sqrt(sqrt(5)/8 + 5/S(8)))
+ 2*(-sqrt(5)/4 + 1/S(4))*log(1/S(4) + sqrt(5)/4)
+ 486853480/S(186374097) - pi*sqrt(sqrt(5)/8 + 5/S(8))/(2*(-sqrt(5)/4 + 1/S(4))))
Hooo = harmonic(no + po/qo)
Aooo = (-log(26) + 2*log(sin(3*pi/13))*cos(54*pi/13) + 2*log(sin(4*pi/13))*cos(6*pi/13)
+ 2*log(sin(6*pi/13))*cos(108*pi/13) - 2*log(sin(5*pi/13))*cos(pi/13)
- 2*log(sin(pi/13))*cos(5*pi/13) + pi*cot(4*pi/13)/2
- 2*log(sin(2*pi/13))*cos(3*pi/13) + 383693479/S(125128080))
H = [Heee, Heeo, Heoe, Heoo, Hoee, Hoeo, Hooe, Hooo]
A = [Aeee, Aeeo, Aeoe, Aeoo, Aoee, Aoeo, Aooe, Aooo]
for h, a in zip(H, A):
e = expand_func(h).doit()
assert cancel(e/a) == 1
assert h.n() == a.n()
def test_harmonic_rewrite_polygamma():
n = Symbol("n")
m = Symbol("m")
assert harmonic(n).rewrite(digamma) == polygamma(0, n + 1) + EulerGamma
assert harmonic(n).rewrite(trigamma) == polygamma(0, n + 1) + EulerGamma
assert harmonic(n).rewrite(polygamma) == polygamma(0, n + 1) + EulerGamma
assert harmonic(n,3).rewrite(polygamma) == polygamma(2, n + 1)/2 - polygamma(2, 1)/2
assert harmonic(n,m).rewrite(polygamma) == (-1)**m*(polygamma(m - 1, 1) - polygamma(m - 1, n + 1))/factorial(m - 1)
assert expand_func(harmonic(n+4)) == harmonic(n) + 1/(n + 4) + 1/(n + 3) + 1/(n + 2) + 1/(n + 1)
assert expand_func(harmonic(n-4)) == harmonic(n) - 1/(n - 1) - 1/(n - 2) - 1/(n - 3) - 1/n
assert harmonic(n, m).rewrite("tractable") == harmonic(n, m).rewrite(polygamma)
@XFAIL
def test_harmonic_limit_fail():
n = Symbol("n")
m = Symbol("m")
# For m > 1:
assert limit(harmonic(n, m), n, oo) == zeta(m)
@XFAIL
def test_harmonic_rewrite_sum_fail():
n = Symbol("n")
m = Symbol("m")
_k = Dummy("k")
assert harmonic(n).rewrite(Sum) == Sum(1/_k, (_k, 1, n))
assert harmonic(n, m).rewrite(Sum) == Sum(_k**(-m), (_k, 1, n))
def replace_dummy(expr, sym):
dum = expr.atoms(Dummy)
if not dum:
return expr
assert len(dum) == 1
return expr.xreplace({dum.pop(): sym})
def test_harmonic_rewrite_sum():
n = Symbol("n")
m = Symbol("m")
_k = Dummy("k")
assert replace_dummy(harmonic(n).rewrite(Sum), _k) == Sum(1/_k, (_k, 1, n))
assert replace_dummy(harmonic(n, m).rewrite(Sum), _k) == Sum(_k**(-m), (_k, 1, n))
def test_euler():
assert euler(0) == 1
assert euler(1) == 0
assert euler(2) == -1
assert euler(3) == 0
assert euler(4) == 5
assert euler(6) == -61
assert euler(8) == 1385
assert euler(20, evaluate=False) != 370371188237525
n = Symbol('n', integer=True)
assert euler(n) != -1
assert euler(n).subs(n, 2) == -1
assert euler(20).evalf() == 370371188237525.0
assert euler(20, evaluate=False).evalf() == 370371188237525.0
assert euler(n).rewrite(Sum) == euler(n)
# XXX: Not sure what the guy who wrote this test was trying to do with the _j and _k stuff
assert euler(2*n + 1).rewrite(Sum) == 0
@XFAIL
def test_euler_failing():
# depends on dummy variables being implemented https://github.com/sympy/sympy/issues/5665
assert euler(2*n).rewrite(Sum) == I*Sum(Sum((-1)**_j*2**(-_k)*I**(-_k)*(-2*_j + _k)**(2*n + 1)*binomial(_k, _j)/_k, (_j, 0, _k)), (_k, 1, 2*n + 1))
def test_catalan():
assert catalan(1) == 1
assert catalan(2) == 2
assert catalan(3) == 5
assert catalan(4) == 14
assert catalan(x) == catalan(x)
assert catalan(2*x).rewrite(binomial) == binomial(4*x, 2*x)/(2*x + 1)
assert catalan(Rational(1, 2)).rewrite(gamma) == 8/(3*pi)
assert catalan(3*x).rewrite(gamma) == 4**(
3*x)*gamma(3*x + Rational(1, 2))/(sqrt(pi)*gamma(3*x + 2))
assert catalan(x).rewrite(hyper) == hyper((-x + 1, -x), (2,), 1)
assert diff(catalan(x), x) == (polygamma(
0, x + Rational(1, 2)) - polygamma(0, x + 2) + log(4))*catalan(x)
c = catalan(0.5).evalf()
assert str(c) == '0.848826363156775'
def test_nC_nP_nT():
from sympy.utilities.iterables import (
multiset_permutations, multiset_combinations, multiset_partitions,
partitions, subsets, permutations)
from sympy.functions.combinatorial.numbers import (
nP, nC, nT, stirling, _multiset_histogram, _AOP_product)
from sympy.combinatorics.permutations import Permutation
from sympy.core.numbers import oo
from random import choice
c = string.ascii_lowercase
for i in range(100):
s = ''.join(choice(c) for i in range(7))
u = len(s) == len(set(s))
try:
tot = 0
for i in range(8):
check = nP(s, i)
tot += check
assert len(list(multiset_permutations(s, i))) == check
if u:
assert nP(len(s), i) == check
assert nP(s) == tot
except AssertionError:
print(s, i, 'failed perm test')
raise ValueError()
for i in range(100):
s = ''.join(choice(c) for i in range(7))
u = len(s) == len(set(s))
try:
tot = 0
for i in range(8):
check = nC(s, i)
tot += check
assert len(list(multiset_combinations(s, i))) == check
if u:
assert nC(len(s), i) == check
assert nC(s) == tot
if u:
assert nC(len(s)) == tot
except AssertionError:
print(s, i, 'failed combo test')
raise ValueError()
for i in range(1, 10):
tot = 0
for j in range(1, i + 2):
check = nT(i, j)
tot += check
assert sum(1 for p in partitions(i, j, size=True) if p[0] == j) == check
assert nT(i) == tot
for i in range(1, 10):
tot = 0
for j in range(1, i + 2):
check = nT(range(i), j)
tot += check
assert len(list(multiset_partitions(range(i), j))) == check
assert nT(range(i)) == tot
for i in range(100):
s = ''.join(choice(c) for i in range(7))
u = len(s) == len(set(s))
try:
tot = 0
for i in range(1, 8):
check = nT(s, i)
tot += check
assert len(list(multiset_partitions(s, i))) == check
if u:
assert nT(range(len(s)), i) == check
if u:
assert nT(range(len(s))) == tot
assert nT(s) == tot
except AssertionError:
print(s, i, 'failed partition test')
raise ValueError()
# tests for Stirling numbers of the first kind that are not tested in the
# above
assert [stirling(9, i, kind=1) for i in range(11)] == [
0, 40320, 109584, 118124, 67284, 22449, 4536, 546, 36, 1, 0]
perms = list(permutations(range(4)))
assert [sum(1 for p in perms if Permutation(p).cycles == i)
for i in range(5)] == [0, 6, 11, 6, 1] == [
stirling(4, i, kind=1) for i in range(5)]
# http://oeis.org/A008275
assert [stirling(n, k, signed=1)
for n in range(10) for k in range(1, n + 1)] == [
1, -1,
1, 2, -3,
1, -6, 11, -6,
1, 24, -50, 35, -10,
1, -120, 274, -225, 85, -15,
1, 720, -1764, 1624, -735, 175, -21,
1, -5040, 13068, -13132, 6769, -1960, 322, -28,
1, 40320, -109584, 118124, -67284, 22449, -4536, 546, -36, 1]
# http://en.wikipedia.org/wiki/Stirling_numbers_of_the_first_kind
assert [stirling(n, k, kind=1)
for n in range(10) for k in range(n+1)] == [
1,
0, 1,
0, 1, 1,
0, 2, 3, 1,
0, 6, 11, 6, 1,
0, 24, 50, 35, 10, 1,
0, 120, 274, 225, 85, 15, 1,
0, 720, 1764, 1624, 735, 175, 21, 1,
0, 5040, 13068, 13132, 6769, 1960, 322, 28, 1,
0, 40320, 109584, 118124, 67284, 22449, 4536, 546, 36, 1]
# http://en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind
assert [stirling(n, k, kind=2)
for n in range(10) for k in range(n+1)] == [
1,
0, 1,
0, 1, 1,
0, 1, 3, 1,
0, 1, 7, 6, 1,
0, 1, 15, 25, 10, 1,
0, 1, 31, 90, 65, 15, 1,
0, 1, 63, 301, 350, 140, 21, 1,
0, 1, 127, 966, 1701, 1050, 266, 28, 1,
0, 1, 255, 3025, 7770, 6951, 2646, 462, 36, 1]
assert stirling(3, 4, kind=1) == stirling(3, 4, kind=1) == 0
raises(ValueError, lambda: stirling(-2, 2))
def delta(p):
if len(p) == 1:
return oo
return min(abs(i[0] - i[1]) for i in subsets(p, 2))
parts = multiset_partitions(range(5), 3)
d = 2
assert (sum(1 for p in parts if all(delta(i) >= d for i in p)) ==
stirling(5, 3, d=d) == 7)
# other coverage tests
assert nC('abb', 2) == nC('aab', 2) == 2
assert nP(3, 3, replacement=True) == nP('aabc', 3, replacement=True) == 27
assert nP(3, 4) == 0
assert nP('aabc', 5) == 0
assert nC(4, 2, replacement=True) == nC('abcdd', 2, replacement=True) == \
len(list(multiset_combinations('aabbccdd', 2))) == 10
assert nC('abcdd') == sum(nC('abcdd', i) for i in range(6)) == 24
assert nC(list('abcdd'), 4) == 4
assert nT('aaaa') == nT(4) == len(list(partitions(4))) == 5
assert nT('aaab') == len(list(multiset_partitions('aaab'))) == 7
assert nC('aabb'*3, 3) == 4 # aaa, bbb, abb, baa
assert dict(_AOP_product((4,1,1,1))) == {
0: 1, 1: 4, 2: 7, 3: 8, 4: 8, 5: 7, 6: 4, 7: 1}
# the following was the first t that showed a problem in a previous form of
# the function, so it's not as random as it may appear
t = (3, 9, 4, 6, 6, 5, 5, 2, 10, 4)
assert sum(_AOP_product(t)[i] for i in range(55)) == 58212000
raises(ValueError, lambda: _multiset_histogram({1:'a'}))
| |
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Project'
db.create_table('projects_project', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='child_set', null=True, to=orm['projects.Project'])),
('manager', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='manager', null=True, to=orm['identities.Contact'])),
('client', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='client', null=True, to=orm['identities.Contact'])),
('details', self.gf('django.db.models.fields.TextField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal('projects', ['Project'])
# Adding model 'TaskStatus'
db.create_table('projects_taskstatus', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('details', self.gf('django.db.models.fields.TextField')(max_length=255, null=True, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=False)),
('hidden', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('projects', ['TaskStatus'])
# Adding model 'Milestone'
db.create_table('projects_milestone', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['projects.Project'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('status', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['projects.TaskStatus'])),
('details', self.gf('django.db.models.fields.TextField')(max_length=255, null=True, blank=True)),
('start_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('end_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
))
db.send_create_signal('projects', ['Milestone'])
# Adding model 'Task'
db.create_table('projects_task', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('parent', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='child_set', null=True, to=orm['projects.Task'])),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['projects.Project'])),
('milestone', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['projects.Milestone'], null=True, blank=True)),
('status', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['projects.TaskStatus'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('details', self.gf('django.db.models.fields.TextField')(max_length=255, null=True, blank=True)),
('caller', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['identities.Contact'], null=True, blank=True)),
('start_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('end_date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('priority', self.gf('django.db.models.fields.IntegerField')(default=3)),
('estimated_time', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
))
db.send_create_signal('projects', ['Task'])
# Adding M2M table for field assigned on 'Task'
db.create_table('projects_task_assigned', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('task', models.ForeignKey(orm['projects.task'], null=False)),
('user', models.ForeignKey(orm['core.user'], null=False))
))
db.create_unique('projects_task_assigned', ['task_id', 'user_id'])
# Adding model 'TaskTimeSlot'
db.create_table('projects_tasktimeslot', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('task', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['projects.Task'])),
('time_from', self.gf('django.db.models.fields.DateTimeField')()),
('time_to', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('timezone', self.gf('django.db.models.fields.IntegerField')(default=0)),
('details', self.gf('django.db.models.fields.TextField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal('projects', ['TaskTimeSlot'])
# Adding model 'TaskRecord'
db.create_table('projects_taskrecord', (
('object_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['core.Object'], unique=True, primary_key=True)),
('task', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['projects.Task'])),
('record_type', self.gf('django.db.models.fields.CharField')(max_length=256)),
('details', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('projects', ['TaskRecord'])
def backwards(self, orm):
# Deleting model 'Project'
db.delete_table('projects_project')
# Deleting model 'TaskStatus'
db.delete_table('projects_taskstatus')
# Deleting model 'Milestone'
db.delete_table('projects_milestone')
# Deleting model 'Task'
db.delete_table('projects_task')
# Removing M2M table for field assigned on 'Task'
db.delete_table('projects_task_assigned')
# Deleting model 'TaskTimeSlot'
db.delete_table('projects_tasktimeslot')
# Deleting model 'TaskRecord'
db.delete_table('projects_taskrecord')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group'},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['core.Group']"})
},
'core.object': {
'Meta': {'object_name': 'Object'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'everybody_execute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'everybody_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'everybody_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Group']"}),
'group_execute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'links_rel_+'", 'null': 'True', 'to': "orm['core.Object']"}),
'nuvius_resource': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'trash': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']"}),
'user_execute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_write': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.user': {
'Meta': {'ordering': "['name']", 'object_name': 'User'},
'default_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'default_user_set'", 'null': 'True', 'to': "orm['core.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'other_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'identities.contact': {
'Meta': {'ordering': "['name']", 'object_name': 'Contact', '_ormbases': ['core.Object']},
'contact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.ContactType']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['identities.Contact']"}),
'related_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'related_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']", 'null': 'True', 'blank': 'True'})
},
'identities.contactfield': {
'Meta': {'ordering': "['name']", 'object_name': 'ContactField', '_ormbases': ['core.Object']},
'allowed_values': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'identities.contacttype': {
'Meta': {'ordering': "['name']", 'object_name': 'ContactType', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['identities.ContactField']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'projects.milestone': {
'Meta': {'ordering': "['name']", 'object_name': 'Milestone', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']"}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.TaskStatus']"})
},
'projects.project': {
'Meta': {'ordering': "['name']", 'object_name': 'Project', '_ormbases': ['core.Object']},
'client': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'client'", 'null': 'True', 'to': "orm['identities.Contact']"}),
'details': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'manager': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'manager'", 'null': 'True', 'to': "orm['identities.Contact']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['projects.Project']"})
},
'projects.task': {
'Meta': {'ordering': "('-priority', 'name')", 'object_name': 'Task', '_ormbases': ['core.Object']},
'assigned': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.User']", 'null': 'True', 'blank': 'True'}),
'caller': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']", 'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'estimated_time': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'milestone': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Milestone']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['projects.Task']"}),
'priority': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']"}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.TaskStatus']"})
},
'projects.taskrecord': {
'Meta': {'object_name': 'TaskRecord', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'record_type': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Task']"})
},
'projects.taskstatus': {
'Meta': {'ordering': "('hidden', '-active', 'name')", 'object_name': 'TaskStatus', '_ormbases': ['core.Object']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'details': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'})
},
'projects.tasktimeslot': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'TaskTimeSlot', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Task']"}),
'time_from': ('django.db.models.fields.DateTimeField', [], {}),
'time_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['projects']
| |
#!/usr/bin/env python
from __future__ import division
import roslib; roslib.load_manifest('fs_smach')
import rospy
import smach
import smach_ros
import actionlib
import time
import os
import yaml
import urllib2
from smach_ros import SimpleActionState
from std_msgs.msg import Empty
from fs_smach.msg import RunParameters
from fs_smach.msg import Vial
from fs_smach.msg import RunSummary
from fs_utilities import FileTools
from fs_actionlib.msg import EmptyAction, EmptyGoal
from fs_actionlib.msg import HomeAction, HomeGoal
from fs_actionlib.msg import GoToPosAction, GoToPosGoal
from fs_actuation.srv import AddPuff
from fs_actuation.srv import Cap
from fs_actuation.srv import SetLightsOff
from fs_actuation.srv import SetLightsOn
from fs_actuation.srv import SetRelayOff
from fs_actuation.srv import SetRelayOn
from fs_actuation.srv import StartPuffRepeated
from fs_actuation.srv import StopPuffRepeated
from fs_bias_interface.srv import StartCamera
from fs_bias_interface.srv import StopCamera
file_tools = FileTools()
HARDWARE = rospy.get_param('/fs_smach/hardware')
FLY_SORTER_PARAMETERS = rospy.get_param('/fs_parameters')
ACTIONSTATE_WAIT_TIMEOUT = 200.0
class SetupRunDataPath(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['succeeded','aborted','preempted'])
def execute(self, userdata):
rospy.loginfo('Executing state SETUP_RUN_DATA_PATH')
run_mode = rospy.get_param('/fs_smach/run_mode')
run_mode = run_mode.lower()
file_tools.create_run_data_path(run_mode)
return 'succeeded'
class SetupVialDataPath(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['succeeded','aborted','preempted'],
input_keys=['vial_name'])
def execute(self, userdata):
rospy.loginfo('Executing state SETUP_VIAL_DATA_PATH')
file_tools.create_vial_data_path(userdata.vial_name)
return 'succeeded'
class SaveRunData(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['succeeded','aborted','preempted'])
def execute(self, userdata):
run_data = {}
run_mode = rospy.get_param('/fs_smach/run_mode')
run_mode = run_mode.lower()
run_data['run_mode'] = run_mode
file_tools.save_run_data(run_data)
return 'succeeded'
class SaveVialData(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['succeeded','aborted','preempted'],
input_keys=['training_gender'])
actuation_webserver_port = rospy.get_param('/fs_parameters/webserver_port/actuation')
self.actuation_webserver_url = 'http://localhost:{0}'.format(actuation_webserver_port)
def execute(self, userdata):
rospy.loginfo('Executing state SAVE_VIAL_DATA')
vial_data = {}
run_mode = rospy.get_param('/fs_smach/run_mode')
run_mode = run_mode.lower()
vial_data['run_mode'] = run_mode
if run_mode == 'sorting':
url_str = self.actuation_webserver_url + '/getSavedData'
rsp = urllib2.urlopen(url_str)
fly_data_json = rsp.read()
fly_data = yaml.safe_load(fly_data_json)
vial_data['fly_data'] = fly_data
elif run_mode == 'training':
video_path = rospy.get_param('/fs_data/video_path')
vial_data['video_path'] = video_path
vial_data['training_gender'] = userdata.training_gender
file_tools.save_vial_data(vial_data)
return 'succeeded'
class PublishInitializing(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['succeeded','aborted','preempted'])
self.pub = rospy.Publisher('/fs_status/initializing', Empty)
def execute(self, userdata):
rospy.loginfo('Executing state PUBLISH_INITIALIZING')
self.pub.publish(Empty())
return 'succeeded'
class PublishInitialized(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['succeeded','aborted','preempted'])
self.pub = rospy.Publisher('/fs_status/initialized', Empty)
def execute(self, userdata):
rospy.loginfo('Executing state PUBLISH_INITIALIZED')
self.pub.publish(Empty())
return 'succeeded'
class PublishHibernated(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['succeeded','aborted','preempted'])
self.pub = rospy.Publisher('/fs_status/hibernated', Empty)
def execute(self, userdata):
rospy.loginfo('Executing state PUBLISH_HIBERNATED')
self.pub.publish(Empty())
return 'succeeded'
class PublishVialRunning(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['succeeded','aborted','preempted'],
input_keys=['vial'])
self.pub = rospy.Publisher('/fs_status/vial/running', Vial)
def execute(self, userdata):
rospy.loginfo('Executing state PUBLISH_VIAL_RUNNING')
self.pub.publish(Vial(vial=userdata.vial))
return 'succeeded'
class PublishVialFinished(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['succeeded','aborted','preempted'],
input_keys=['vial'])
self.pub = rospy.Publisher('/fs_status/vial/finished', Vial)
def execute(self, userdata):
rospy.loginfo('Executing state PUBLISH_VIAL_FINISHED')
rospy.wait_for_service('/fs_actuation/set_relay_off')
rospy.wait_for_service('/fs_actuation/stop_puff_repeated')
set_relay_off = rospy.ServiceProxy('/fs_actuation/set_relay_off',SetRelayOff)
stop_puff_repeated = rospy.ServiceProxy('/fs_actuation/stop_puff_repeated',
StopPuffRepeated)
try:
set_relay_off(relay='unknown')
except rospy.ServiceException, e:
print "Service call failed: %s"%e
relays = ['male_funnel', 'female_funnel', 'unknown_funnel']
for relay in relays:
try:
resp = stop_puff_repeated(relay=relay)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
self.pub.publish(Vial(vial=userdata.vial))
return 'succeeded'
class TurnOnLights(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['succeeded','aborted','preempted'])
def execute(self, userdata):
rospy.loginfo('Executing state TURN_ON_LIGHTS')
rospy.wait_for_service('/fs_actuation/set_lights_on')
try:
set_lights_on = rospy.ServiceProxy('/fs_actuation/set_lights_on', SetLightsOn)
resp = set_lights_on()
except rospy.ServiceException, e:
print "Service call failed: %s"%e
return 'succeeded'
class TurnOffLights(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['succeeded','aborted','preempted'])
def execute(self, userdata):
rospy.loginfo('Executing state TURN_OFF_LIGHTS')
rospy.wait_for_service('/fs_actuation/set_lights_off')
try:
set_lights_off = rospy.ServiceProxy('/fs_actuation/set_lights_off', SetLightsOff)
resp = set_lights_off()
except rospy.ServiceException, e:
print "Service call failed: %s"%e
return 'succeeded'
class CheckRunParameters(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['vials_finished','run_vial','aborted','preempted'],
input_keys=['run_parameters'],
output_keys=['vial_to_run','vial_training_gender'])
self.vials_to_run = None
def execute(self, userdata):
rospy.loginfo('Executing state CHECK_RUN_PARAMETERS')
if self.vials_to_run is None:
self.vials_to_run = userdata.run_parameters['vials_to_run']
self.vial_training_genders = userdata.run_parameters['vial_training_genders']
self.run_mode = rospy.get_param('/fs_smach/run_mode')
# rospy.logwarn(self.vials_to_run)
if len(self.vials_to_run) == 0:
self.vials_to_run = None
self.vial_training_genders = None
return 'vials_finished'
else:
vial_number = self.vials_to_run.pop(0)
userdata.vial_to_run = "vial_" + str(vial_number)
userdata.vial_training_gender = None
if self.run_mode == 'training':
try:
userdata.vial_training_gender = self.vial_training_genders.pop("vial_" + str(vial_number))
except KeyError:
pass
return 'run_vial'
class CoolFlies(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['succeeded','aborted','preempted'])
def execute(self, userdata):
rospy.loginfo('Executing state COOL_FLIES')
cool_duration = FLY_SORTER_PARAMETERS['cool']['duration_seconds']
rospy.loginfo('cooling flies for {0} seconds'.format(cool_duration))
if not HARDWARE:
cool_duration = 4
rospy.sleep(cool_duration)
return 'succeeded'
class EjectPuff(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['succeeded','aborted','preempted'])
def execute(self, userdata):
rospy.loginfo('Executing state EJECT_PUFF')
rospy.wait_for_service('/fs_actuation/add_puff')
relay = 'eject'
delay = 0
duration = FLY_SORTER_PARAMETERS['eject']['duration']
try:
add_puff = rospy.ServiceProxy('/fs_actuation/add_puff', AddPuff)
resp = add_puff(relay=relay,
delay=delay,
duration=duration)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
return 'succeeded'
class CleanTurntable(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['succeeded','aborted','preempted'])
def execute(self, userdata):
rospy.loginfo('Executing state CLEAN_TURNTABLE')
rospy.wait_for_service('/fs_actuation/add_puff')
relays = ['unknown','disk']
delay = 0
duration = FLY_SORTER_PARAMETERS['clean']['duration_seconds']
if not HARDWARE:
duration = 4
duration_ms = duration*1000
try:
add_puff = rospy.ServiceProxy('/fs_actuation/add_puff', AddPuff)
for relay in relays:
resp = add_puff(relay=relay,
delay=delay,
duration=duration_ms)
rospy.sleep(duration)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
return 'succeeded'
class RunDispenseSeq(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['succeeded','aborted','preempted'],
input_keys=['training_gender'])
def execute(self, userdata):
rospy.loginfo('Executing state RUN_DISPENSE_SEQ')
run_mode = rospy.get_param('/fs_smach/run_mode')
if run_mode.lower() == 'training':
rospy.wait_for_service('/fs_bias_interface/start_camera')
try:
start_camera = rospy.ServiceProxy('/fs_bias_interface/start_camera',StartCamera)
start_camera(userdata.training_gender)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
if HARDWARE:
rospy.wait_for_service('/fs_actuation/start_puff_repeated')
start_puff_repeated = rospy.ServiceProxy('/fs_actuation/start_puff_repeated',
StartPuffRepeated)
rospy.wait_for_service('/fs_actuation/set_relay_on')
set_relay_on = rospy.ServiceProxy('/fs_actuation/set_relay_on',SetRelayOn)
try:
set_relay_on(relay='unknown')
except rospy.ServiceException, e:
print "Service call failed: %s"%e
relays = ['male_funnel', 'female_funnel', 'unknown_funnel']
seq = FLY_SORTER_PARAMETERS['funnel_puff_seq']
for relay in relays:
time_on = seq['on']
time_off = seq['off']
count = -1
try:
resp = start_puff_repeated(relay=relay,
time_on=time_on,
time_off=time_off,
count=count)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
relays = ['dispense_low', 'dispense_med', 'dispense_high']
for relay in relays:
seq = FLY_SORTER_PARAMETERS['dispense_seq'][relay]
for stage in seq:
time_on = stage['on']
time_off = stage['off']
count = stage['count']
try:
resp = start_puff_repeated(relay=relay,
time_on=time_on,
time_off=time_off,
count=count)
duration = count*(time_on + time_off)/1000.0
rospy.sleep(duration)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
else:
rospy.sleep(2)
return 'succeeded'
class WaitAfterDispense(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['succeeded','aborted','preempted'])
def execute(self, userdata):
rospy.loginfo('Executing state WAIT_AFTER_DISPENSE')
wait_duration = FLY_SORTER_PARAMETERS['wait_after_dispense']['duration_seconds']
if not HARDWARE:
wait_duration = 1
rospy.sleep(wait_duration)
return 'succeeded'
class CapVials(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['succeeded','aborted','preempted'])
def execute(self, userdata):
rospy.loginfo('Executing state CAP_VIALS')
rospy.wait_for_service('/fs_actuation/cap')
try:
cap = rospy.ServiceProxy('/fs_actuation/cap', Cap)
resp = cap()
except rospy.ServiceException, e:
print "Service call failed: %s"%e
run_mode = rospy.get_param('/fs_smach/run_mode')
if run_mode.lower() == 'training':
rospy.wait_for_service('/fs_bias_interface/stop_camera')
try:
stop_camera = rospy.ServiceProxy('/fs_bias_interface/stop_camera',StopCamera)
stop_camera()
except rospy.ServiceException, e:
print "Service call failed: %s"%e
return 'succeeded'
class WaitToGetRunParameters(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['succeeded','hibernate','aborted','preempted'],
output_keys=['run_parameters'])
def execute(self, userdata):
rospy.loginfo('Executing state WAIT_TO_GET_RUN_PARAMETERS')
rospy.Subscriber("/fs_controls/set_run_parameters", RunParameters, self.run_callback)
rospy.Subscriber("/fs_controls/hibernate", Empty, self.hibernate_callback)
self.return_outcome = None
while self.return_outcome is None:
rospy.sleep(0.1)
if self.return_outcome is 'succeeded':
userdata.run_parameters = self.run_parameters
rospy.loginfo("run_parameters = " + str(self.run_parameters))
return self.return_outcome
def run_callback(self,data):
vials_to_run_dict = yaml.safe_load(data.vials_to_run)
vial_training_genders = yaml.safe_load(data.vial_training_genders)
vials_to_run_list = []
for vial in vials_to_run_dict:
if vials_to_run_dict[vial]:
vials_to_run_list.append(int(vial.replace('vial_','')))
else:
try:
vial_training_genders.pop(vial)
except KeyError:
pass
vials_to_run_list.sort()
self.run_parameters = {}
self.run_parameters['vials_to_run'] = vials_to_run_list
self.run_parameters['vial_training_genders'] = vial_training_genders
self.return_outcome = 'succeeded'
def hibernate_callback(self,data):
self.return_outcome = 'hibernate'
def empty_monitor_cb(ud, msg):
return False
class PublishRunFinished(smach.State):
def __init__(self):
smach.State.__init__(self,
outcomes=['succeeded','aborted','preempted'])
self.pub = rospy.Publisher('/fs_status/run_finished', RunSummary)
def execute(self, userdata):
rospy.loginfo('Executing state PUBLISH_RUN_FINISHED')
self.pub.publish(RunSummary(run_data=file_tools.run_data,
set_data=file_tools.set_data,
fly_sorter_data=file_tools.fly_sorter_data))
return 'succeeded'
class WaitToRunAgainOrHibernate(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['run_again','hibernate','aborted','preempted'])
def execute(self, userdata):
rospy.loginfo('Executing state WAIT_TO_RUN_AGAIN_OR_HIBERNATE')
rospy.Subscriber("/fs_controls/run_again", Empty, self.run_again_callback)
rospy.Subscriber("/fs_controls/hibernate", Empty, self.hibernate_callback)
self.return_outcome = None
while self.return_outcome is None:
rospy.sleep(0.1)
return self.return_outcome
def run_again_callback(self,data):
self.return_outcome = 'run_again'
def hibernate_callback(self,data):
self.return_outcome = 'hibernate'
class EmptyVial(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['succeeded','aborted','preempted'])
def execute(self, userdata):
rospy.loginfo('Executing state EMPTY_VIAL')
client = actionlib.SimpleActionClient('/fs_actionlib/oscillate_input_pos',EmptyAction)
rospy.wait_for_service('/fs_actuation/set_relay_on')
set_relay_on = rospy.ServiceProxy('/fs_actuation/set_relay_on',SetRelayOn)
rospy.wait_for_service('/fs_actuation/set_relay_off')
set_relay_off = rospy.ServiceProxy('/fs_actuation/set_relay_off',SetRelayOff)
try:
set_relay_on(relay='dispense_high')
except rospy.ServiceException, e:
print "Service call failed: %s"%e
client.wait_for_server()
goal = EmptyGoal()
client.send_goal(goal)
client.wait_for_result()
client.get_result()
try:
set_relay_off(relay='dispense_high')
except rospy.ServiceException, e:
print "Service call failed: %s"%e
duration_before = FLY_SORTER_PARAMETERS['ramp']['duration_before_seconds']
rospy.sleep(duration_before)
duration_on = FLY_SORTER_PARAMETERS['ramp']['duration_on_seconds']
try:
set_relay_on(relay='ramp')
except rospy.ServiceException, e:
print "Service call failed: %s"%e
rospy.sleep(duration_on)
try:
set_relay_off(relay='ramp')
except rospy.ServiceException, e:
print "Service call failed: %s"%e
return 'succeeded'
class AutomaticModeSmach(object):
def __init__(self):
rospy.init_node('fs_smach_automatic_mode',log_level=rospy.INFO)
# Create a SMACH state machine
self.sm = smach.StateMachine(outcomes=['succeeded','aborted','preempted'])
# Open the container
with self.sm:
# Add states to the container
smach.StateMachine.add('WAIT_TO_INITIALIZE',
smach_ros.MonitorState("/fs_controls/initialize",
Empty,
empty_monitor_cb),
transitions={'invalid':'PUBLISH_INITIALIZING',
'valid':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('PUBLISH_INITIALIZING', PublishInitializing(),
transitions={'succeeded':'INITIALIZE',
'aborted':'aborted',
'preempted':'preempted'})
self.sm_initialize_output = smach.StateMachine(outcomes=['succeeded','aborted','preempted'])
with self.sm_initialize_output:
smach.StateMachine.add('HOME_OUTPUT',
SimpleActionState('/fs_actionlib/home_output',
HomeAction,
goal=HomeGoal(motor_name="output"),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'GO_TO_POS_START_OUTPUT',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('GO_TO_POS_START_OUTPUT',
SimpleActionState('/fs_actionlib/go_to_output_pos',
GoToPosAction,
goal=GoToPosGoal(motor_name="output",
position="origin"),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'succeeded',
'aborted':'aborted',
'preempted':'preempted'})
self.sm_initialize_input = smach.StateMachine(outcomes=['succeeded','aborted','preempted'])
with self.sm_initialize_input:
smach.StateMachine.add('HOME_INPUT_Y',
SimpleActionState('/fs_actionlib/home_input',
HomeAction,
goal=HomeGoal(motor_name="input_y"),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'GO_TO_POS_START_INPUT_Y',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('GO_TO_POS_START_INPUT_Y',
SimpleActionState('/fs_actionlib/go_to_input_pos',
GoToPosAction,
goal=GoToPosGoal(motor_name="input_y",
position="ready"),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'HOME_INPUT_X',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('HOME_INPUT_X',
SimpleActionState('/fs_actionlib/home_input',
HomeAction,
goal=HomeGoal(motor_name="input_x"),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'GO_TO_POS_START_INPUT_X',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('GO_TO_POS_START_INPUT_X',
SimpleActionState('/fs_actionlib/go_to_input_pos',
GoToPosAction,
goal=GoToPosGoal(motor_name="input_x",
position="vial_10"),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'succeeded',
'aborted':'aborted',
'preempted':'preempted'})
self.sm_initialize_other = smach.StateMachine(outcomes=['succeeded','aborted','preempted'])
with self.sm_initialize_other:
smach.StateMachine.add('TURN_ON_LIGHTS', TurnOnLights(),
transitions={'succeeded':'TURN_ON_TURNTABLE',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('TURN_ON_TURNTABLE',
SimpleActionState('/fs_actionlib/turn_on_turntable',
EmptyAction),
transitions={'succeeded':'TURN_ON_CAM',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('TURN_ON_CAM',
SimpleActionState('/fs_actionlib/turn_on_cam',
EmptyAction),
transitions={'succeeded':'CLEAN_TURNTABLE_SETUP',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('CLEAN_TURNTABLE_SETUP', CleanTurntable(),
transitions={'succeeded':'succeeded',
'aborted':'aborted',
'preempted':'preempted'})
self.sm_initialize = smach.Concurrence(outcomes=['succeeded','aborted','preempted'],
default_outcome='succeeded',
outcome_map={'succeeded':
{'INITIALIZE_OUTPUT':'succeeded',
'INITIALIZE_INPUT':'succeeded',
'INITIALIZE_OTHER':'succeeded',
}})
with self.sm_initialize:
smach.Concurrence.add('INITIALIZE_OUTPUT',self.sm_initialize_output)
smach.Concurrence.add('INITIALIZE_INPUT',self.sm_initialize_input)
smach.Concurrence.add('INITIALIZE_OTHER',self.sm_initialize_other)
smach.StateMachine.add('INITIALIZE', self.sm_initialize,
transitions={'succeeded':'PUBLISH_INITIALIZED',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('PUBLISH_INITIALIZED', PublishInitialized(),
transitions={'succeeded':'WAIT_TO_GET_RUN_PARAMETERS',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('WAIT_TO_GET_RUN_PARAMETERS', WaitToGetRunParameters(),
transitions={'succeeded':'RUN',
'hibernate':'HIBERNATE',
'aborted':'aborted',
'preempted':'preempted'},
remapping={'run_parameters':'run_parameters'})
self.sm_run = smach.StateMachine(outcomes=['succeeded','aborted','preempted'],
input_keys=['run_parameters'])
with self.sm_run:
smach.StateMachine.add('SETUP_RUN_DATA_PATH', SetupRunDataPath(),
transitions={'succeeded':'CHECK_RUN_PARAMETERS',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('CHECK_RUN_PARAMETERS', CheckRunParameters(),
transitions={'vials_finished':'succeeded',
'run_vial':'RUN_VIAL',
'aborted':'aborted',
'preempted':'preempted'},
remapping={'run_parameters':'run_parameters',
'vial_to_run':'run_vial_data',
'vial_training_gender':'vial_training_gender'})
self.sm_run_vial = smach.StateMachine(outcomes=['succeeded','aborted','preempted'],
input_keys=['run_vial_input','vial_training_gender'])
with self.sm_run_vial:
def go_to_output_pos_goal_cb(userdata, goal):
go_to_pos_goal = GoToPosGoal()
go_to_pos_goal.motor_name = 'output'
output_pos = userdata.vial_to_run.replace("vial","output")
go_to_pos_goal.position = output_pos
return go_to_pos_goal
def go_to_input_pos_goal_cb(userdata, goal):
go_to_pos_goal = GoToPosGoal()
go_to_pos_goal.motor_name = 'input_x'
go_to_pos_goal.position = userdata.vial_to_run
return go_to_pos_goal
smach.StateMachine.add('PUBLISH_VIAL_RUNNING', PublishVialRunning(),
transitions={'succeeded':'SETUP_VIAL_DATA_PATH',
'aborted':'aborted',
'preempted':'preempted'},
remapping={'vial':'run_vial_input'})
smach.StateMachine.add('SETUP_VIAL_DATA_PATH', SetupVialDataPath(),
transitions={'succeeded':'SETUP_INPUT_AND_OUTPUT',
'aborted':'aborted',
'preempted':'preempted'},
remapping={'vial_name':'run_vial_input'})
self.sm_setup_input = smach.StateMachine(outcomes=['succeeded','aborted','preempted'],
input_keys=['vial_to_run'])
with self.sm_setup_input:
smach.StateMachine.add('INITIALIZE_INPUT_Y',
SimpleActionState('/fs_actionlib/go_to_input_pos',
GoToPosAction,
goal=GoToPosGoal(motor_name="input_y",
position="ready"),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'GO_TO_INPUT_VIAL_POSITION',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('GO_TO_INPUT_VIAL_POSITION',
SimpleActionState('/fs_actionlib/go_to_input_pos',
GoToPosAction,
goal_cb=go_to_input_pos_goal_cb,
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT),
input_keys=['vial_to_run']),
transitions={'succeeded':'succeeded',
'aborted':'aborted',
'preempted':'preempted'},
remapping={'vial_to_run':'vial_to_run'})
self.sm_setup_output = smach.StateMachine(outcomes=['succeeded','aborted','preempted'],
input_keys=['vial_to_run'])
with self.sm_setup_output:
smach.StateMachine.add('GO_TO_OUTPUT_VIAL_POSITION',
SimpleActionState('/fs_actionlib/go_to_output_pos',
GoToPosAction,
goal_cb=go_to_output_pos_goal_cb,
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT),
input_keys=['vial_to_run']),
transitions={'succeeded':'succeeded',
'aborted':'aborted',
'preempted':'preempted'},
remapping={'vial_to_run':'vial_to_run'})
self.sm_setup_input_and_output = smach.Concurrence(outcomes=['succeeded','aborted','preempted'],
default_outcome='succeeded',
input_keys=['vial_to_run'],
outcome_map={'succeeded':
{'SETUP_INPUT':'succeeded',
'SETUP_OUTPUT':'succeeded',
}})
with self.sm_setup_input_and_output:
smach.Concurrence.add('SETUP_INPUT',self.sm_setup_input)
smach.Concurrence.add('SETUP_OUTPUT',self.sm_setup_output)
smach.StateMachine.add('SETUP_INPUT_AND_OUTPUT', self.sm_setup_input_and_output,
transitions={'succeeded':'INSERT_VIAL',
'aborted':'aborted',
'preempted':'preempted'},
remapping={'vial_to_run':'run_vial_input'})
smach.StateMachine.add('INSERT_VIAL',
SimpleActionState('/fs_actionlib/go_to_input_pos',
GoToPosAction,
goal=GoToPosGoal(motor_name="input_y",
position="vial_inserted"),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'COOL_FLIES',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('COOL_FLIES', CoolFlies(),
transitions={'succeeded':'REMOVE_VIAL',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('REMOVE_VIAL',
SimpleActionState('/fs_actionlib/go_to_input_pos',
GoToPosAction,
goal=GoToPosGoal(motor_name="input_y",
position="ready"),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'GO_TO_DISPENSE',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('GO_TO_DISPENSE',
SimpleActionState('/fs_actionlib/go_to_input_pos',
GoToPosAction,
goal=GoToPosGoal(motor_name="input_x",
position="dispense"),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'PLACE_VIAL_IN_DISPENSE',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('PLACE_VIAL_IN_DISPENSE',
SimpleActionState('/fs_actionlib/go_to_input_pos',
GoToPosAction,
goal=GoToPosGoal(motor_name="input_y",
position="dispense"),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'RUN_DISPENSE_SEQ',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('RUN_DISPENSE_SEQ', RunDispenseSeq(),
transitions={'succeeded':'EMPTY_VIAL',
'aborted':'aborted',
'preempted':'preempted'},
remapping={'training_gender':'vial_training_gender'})
smach.StateMachine.add('EMPTY_VIAL', EmptyVial(),
transitions={'succeeded':'WAIT_CAP_RETURN_VIAL',
'aborted':'aborted',
'preempted':'preempted'})
self.sm_wait_and_cap = smach.StateMachine(outcomes=['succeeded','aborted','preempted'])
with self.sm_wait_and_cap:
smach.StateMachine.add('WAIT_AFTER_DISPENSE', WaitAfterDispense(),
transitions={'succeeded':'CAP_VIALS',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('CAP_VIALS', CapVials(),
transitions={'succeeded':'succeeded',
'aborted':'aborted',
'preempted':'preempted'})
self.sm_return_vial = smach.StateMachine(outcomes=['succeeded','aborted','preempted'],
input_keys=['run_vial_input'])
with self.sm_return_vial:
smach.StateMachine.add('REMOVE_VIAL_FROM_DISPENSE',
SimpleActionState('/fs_actionlib/go_to_input_pos',
GoToPosAction,
goal=GoToPosGoal(motor_name="input_y",
position="ready"),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'RETURN_TO_VIAL_POSITION',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('RETURN_TO_VIAL_POSITION',
SimpleActionState('/fs_actionlib/go_to_input_pos',
GoToPosAction,
goal_cb=go_to_input_pos_goal_cb,
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT),
input_keys=['vial_to_run']),
transitions={'succeeded':'REPLACE_VIAL',
'aborted':'aborted',
'preempted':'preempted'},
remapping={'vial_to_run':'run_vial_input'})
smach.StateMachine.add('REPLACE_VIAL',
SimpleActionState('/fs_actionlib/go_to_input_pos',
GoToPosAction,
goal=GoToPosGoal(motor_name="input_y",
position="vial_inserted"),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'EJECT_PUFF',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('EJECT_PUFF', EjectPuff(),
transitions={'succeeded':'LEAVE_VIAL',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('LEAVE_VIAL',
SimpleActionState('/fs_actionlib/go_to_input_pos',
GoToPosAction,
goal=GoToPosGoal(motor_name="input_y",
position="ready"),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'succeeded',
'aborted':'aborted',
'preempted':'preempted'})
self.sm_wait_cap_return_vial = smach.Concurrence(outcomes=['succeeded','aborted','preempted'],
default_outcome='succeeded',
input_keys=['run_vial_input'],
outcome_map={'succeeded':
{'WAIT_AND_CAP':'succeeded',
'RETURN_VIAL':'succeeded',
}})
with self.sm_wait_cap_return_vial:
smach.Concurrence.add('WAIT_AND_CAP',self.sm_wait_and_cap)
smach.Concurrence.add('RETURN_VIAL',self.sm_return_vial,
remapping={'run_vial_input':'run_vial_input'})
smach.StateMachine.add('WAIT_CAP_RETURN_VIAL', self.sm_wait_cap_return_vial,
transitions={'succeeded':'SAVE_VIAL_DATA',
'aborted':'aborted',
'preempted':'preempted'},
remapping={'run_vial_input':'run_vial_input'})
smach.StateMachine.add('SAVE_VIAL_DATA', SaveVialData(),
transitions={'succeeded':'PUBLISH_VIAL_FINISHED',
'aborted':'aborted',
'preempted':'preempted'},
remapping={'training_gender':'vial_training_gender'})
smach.StateMachine.add('PUBLISH_VIAL_FINISHED', PublishVialFinished(),
transitions={'succeeded':'succeeded',
'aborted':'aborted',
'preempted':'preempted'},
remapping={'vial':'run_vial_input'})
smach.StateMachine.add('RUN_VIAL', self.sm_run_vial,
transitions={'succeeded':'CHECK_RUN_PARAMETERS',
'aborted':'aborted',
'preempted':'preempted'},
remapping={'run_vial_input':'run_vial_data',
'vial_training_gender':'vial_training_gender'})
smach.StateMachine.add('RUN', self.sm_run,
transitions={'succeeded':'SAVE_RUN_DATA',
'aborted':'aborted',
'preempted':'preempted'},
remapping={'run_parameters':'run_parameters'})
smach.StateMachine.add('SAVE_RUN_DATA', SaveRunData(),
transitions={'succeeded':'FINISH_RUN',
'aborted':'aborted',
'preempted':'preempted'})
self.sm_finish_run_output = smach.StateMachine(outcomes=['succeeded','aborted','preempted'])
with self.sm_finish_run_output:
finish_run_output_position = 'origin'
smach.StateMachine.add('GO_TO_POS_FINISH_RUN_OUTPUT',
SimpleActionState('/fs_actionlib/go_to_output_pos',
GoToPosAction,
goal=GoToPosGoal(motor_name="output",
position=finish_run_output_position),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'succeeded',
'aborted':'aborted',
'preempted':'preempted'})
self.sm_finish_run_input = smach.StateMachine(outcomes=['succeeded','aborted','preempted'])
with self.sm_finish_run_input:
smach.StateMachine.add('GO_TO_POS_FINISH_RUN_INPUT_Y',
SimpleActionState('/fs_actionlib/go_to_input_pos',
GoToPosAction,
goal=GoToPosGoal(motor_name="input_y",
position="ready"),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'GO_TO_POS_FINISH_RUN_INPUT_X',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('GO_TO_POS_FINISH_RUN_INPUT_X',
SimpleActionState('/fs_actionlib/go_to_input_pos',
GoToPosAction,
goal=GoToPosGoal(motor_name="input_x",
position="vial_10"),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'succeeded',
'aborted':'aborted',
'preempted':'preempted'})
self.sm_finish_run = smach.Concurrence(outcomes=['succeeded','aborted','preempted'],
default_outcome='succeeded',
outcome_map={'succeeded':
{'FINISH_RUN_OUTPUT':'succeeded',
'FINISH_RUN_INPUT':'succeeded',
}})
with self.sm_finish_run:
smach.Concurrence.add('FINISH_RUN_OUTPUT',self.sm_finish_run_output)
smach.Concurrence.add('FINISH_RUN_INPUT',self.sm_finish_run_input)
smach.StateMachine.add('FINISH_RUN', self.sm_finish_run,
transitions={'succeeded':'PUBLISH_RUN_FINISHED',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('PUBLISH_RUN_FINISHED', PublishRunFinished(),
transitions={'succeeded':'WAIT_TO_RUN_AGAIN_OR_HIBERNATE',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('WAIT_TO_RUN_AGAIN_OR_HIBERNATE', WaitToRunAgainOrHibernate(),
transitions={'run_again':'PUBLISH_INITIALIZED',
'hibernate':'HIBERNATE',
'aborted':'aborted',
'preempted':'preempted'})
self.sm_hibernate_output = smach.StateMachine(outcomes=['succeeded','aborted','preempted'])
with self.sm_hibernate_output:
hibernate_output_position = 'origin'
smach.StateMachine.add('GO_TO_POS_HIBERNATE_OUTPUT',
SimpleActionState('/fs_actionlib/go_to_output_pos',
GoToPosAction,
goal=GoToPosGoal(motor_name="output",
position=hibernate_output_position),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'succeeded',
'aborted':'aborted',
'preempted':'preempted'})
self.sm_hibernate_input = smach.StateMachine(outcomes=['succeeded','aborted','preempted'])
with self.sm_hibernate_input:
smach.StateMachine.add('GO_TO_POS_HIBERNATE_INPUT_Y',
SimpleActionState('/fs_actionlib/go_to_input_pos',
GoToPosAction,
goal=GoToPosGoal(motor_name="input_y",
position="ready"),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'GO_TO_POS_HIBERNATE_INPUT_X',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('GO_TO_POS_HIBERNATE_INPUT_X',
SimpleActionState('/fs_actionlib/go_to_input_pos',
GoToPosAction,
goal=GoToPosGoal(motor_name="input_x",
position="vial_10"),
server_wait_timeout=rospy.Duration(ACTIONSTATE_WAIT_TIMEOUT)),
transitions={'succeeded':'succeeded',
'aborted':'aborted',
'preempted':'preempted'})
self.sm_hibernate_other = smach.StateMachine(outcomes=['succeeded','aborted','preempted'])
with self.sm_hibernate_other:
smach.StateMachine.add('CLEAN_TURNTABLE_HIBERNATE', CleanTurntable(),
transitions={'succeeded':'TURN_OFF_LIGHTS',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('TURN_OFF_LIGHTS', TurnOffLights(),
transitions={'succeeded':'STOP_ALL_MOTORS',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('STOP_ALL_MOTORS',
SimpleActionState('/fs_actionlib/stop_all_motors',
EmptyAction),
transitions={'succeeded':'succeeded',
'aborted':'aborted',
'preempted':'preempted'})
self.sm_hibernate = smach.Concurrence(outcomes=['succeeded','aborted','preempted'],
default_outcome='succeeded',
outcome_map={'succeeded':
{'HIBERNATE_INPUT':'succeeded',
# 'HIBERNATE_OUTPUT':'succeeded',
'HIBERNATE_OTHER':'succeeded',
}})
with self.sm_hibernate:
# smach.Concurrence.add('HIBERNATE_OUTPUT',self.sm_hibernate_output)
smach.Concurrence.add('HIBERNATE_INPUT',self.sm_hibernate_input)
smach.Concurrence.add('HIBERNATE_OTHER',self.sm_hibernate_other)
smach.StateMachine.add('HIBERNATE', self.sm_hibernate,
transitions={'succeeded':'HIBERNATE_OUTPUT',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('HIBERNATE_OUTPUT', self.sm_hibernate_output,
transitions={'succeeded':'PUBLISH_HIBERNATED',
'aborted':'aborted',
'preempted':'preempted'})
smach.StateMachine.add('PUBLISH_HIBERNATED', PublishHibernated(),
transitions={'succeeded':'WAIT_TO_INITIALIZE',
'aborted':'aborted',
'preempted':'preempted'})
def execute(self):
outcome = self.sm.execute()
if __name__ == '__main__':
ams = AutomaticModeSmach()
# sis = smach_ros.IntrospectionServer('fs_smach_automatic_mode', ams.sm, '/SM_ROOT')
# sis.start()
ams.execute()
rospy.spin()
# sis.stop()
| |
# pvec.py - probabilistic vector clocks for Mercurial
#
# Copyright 2012 Olivia Mackall <olivia@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''
A "pvec" is a changeset property based on the theory of vector clocks
that can be compared to discover relatedness without consulting a
graph. This can be useful for tasks like determining how a
disconnected patch relates to a repository.
Currently a pvec consist of 448 bits, of which 24 are 'depth' and the
remainder are a bit vector. It is represented as a 70-character base85
string.
Construction:
- a root changeset has a depth of 0 and a bit vector based on its hash
- a normal commit has a changeset where depth is increased by one and
one bit vector bit is flipped based on its hash
- a merge changeset pvec is constructed by copying changes from one pvec into
the other to balance its depth
Properties:
- for linear changes, difference in depth is always <= hamming distance
- otherwise, changes are probably divergent
- when hamming distance is < 200, we can reliably detect when pvecs are near
Issues:
- hamming distance ceases to work over distances of ~ 200
- detecting divergence is less accurate when the common ancestor is very close
to either revision or total distance is high
- this could probably be improved by modeling the relation between
delta and hdist
Uses:
- a patch pvec can be used to locate the nearest available common ancestor for
resolving conflicts
- ordering of patches can be established without a DAG
- two head pvecs can be compared to determine whether push/pull/merge is needed
and approximately how many changesets are involved
- can be used to find a heuristic divergence measure between changesets on
different branches
'''
from __future__ import absolute_import
from .node import nullrev
from . import (
pycompat,
util,
)
_size = 448 # 70 chars b85-encoded
_bytes = _size // 8
_depthbits = 24
_depthbytes = _depthbits // 8
_vecbytes = _bytes - _depthbytes
_vecbits = _vecbytes * 8
_radius = (_vecbits - 30) // 2 # high probability vectors are related
def _bin(bs):
'''convert a bytestring to a long'''
v = 0
for b in bs:
v = v * 256 + ord(b)
return v
def _str(v, l):
# type: (int, int) -> bytes
bs = b""
for p in pycompat.xrange(l):
bs = pycompat.bytechr(v & 255) + bs
v >>= 8
return bs
def _split(b):
'''depth and bitvec'''
return _bin(b[:_depthbytes]), _bin(b[_depthbytes:])
def _join(depth, bitvec):
return _str(depth, _depthbytes) + _str(bitvec, _vecbytes)
def _hweight(x):
c = 0
while x:
if x & 1:
c += 1
x >>= 1
return c
_htab = [_hweight(x) for x in pycompat.xrange(256)]
def _hamming(a, b):
'''find the hamming distance between two longs'''
d = a ^ b
c = 0
while d:
c += _htab[d & 0xFF]
d >>= 8
return c
def _mergevec(x, y, c):
# Ideally, this function would be x ^ y ^ ancestor, but finding
# ancestors is a nuisance. So instead we find the minimal number
# of changes to balance the depth and hamming distance
d1, v1 = x
d2, v2 = y
if d1 < d2:
d1, d2, v1, v2 = d2, d1, v2, v1
hdist = _hamming(v1, v2)
ddist = d1 - d2
v = v1
m = v1 ^ v2 # mask of different bits
i = 1
if hdist > ddist:
# if delta = 10 and hdist = 100, then we need to go up 55 steps
# to the ancestor and down 45
changes = (hdist - ddist + 1) // 2
else:
# must make at least one change
changes = 1
depth = d1 + changes
# copy changes from v2
if m:
while changes:
if m & i:
v ^= i
changes -= 1
i <<= 1
else:
v = _flipbit(v, c)
return depth, v
def _flipbit(v, node):
# converting bit strings to longs is slow
bit = (hash(node) & 0xFFFFFFFF) % _vecbits
return v ^ (1 << bit)
def ctxpvec(ctx):
'''construct a pvec for ctx while filling in the cache'''
r = ctx.repo()
if not util.safehasattr(r, "_pveccache"):
r._pveccache = {}
pvc = r._pveccache
if ctx.rev() not in pvc:
cl = r.changelog
for n in pycompat.xrange(ctx.rev() + 1):
if n not in pvc:
node = cl.node(n)
p1, p2 = cl.parentrevs(n)
if p1 == nullrev:
# start with a 'random' vector at root
pvc[n] = (0, _bin((node * 3)[:_vecbytes]))
elif p2 == nullrev:
d, v = pvc[p1]
pvc[n] = (d + 1, _flipbit(v, node))
else:
pvc[n] = _mergevec(pvc[p1], pvc[p2], node)
bs = _join(*pvc[ctx.rev()])
return pvec(util.b85encode(bs))
class pvec(object):
def __init__(self, hashorctx):
if isinstance(hashorctx, bytes):
self._bs = hashorctx
self._depth, self._vec = _split(util.b85decode(hashorctx))
else:
self._vec = ctxpvec(hashorctx)
def __str__(self):
return self._bs
def __eq__(self, b):
return self._vec == b._vec and self._depth == b._depth
def __lt__(self, b):
delta = b._depth - self._depth
if delta < 0:
return False # always correct
if _hamming(self._vec, b._vec) > delta:
return False
return True
def __gt__(self, b):
return b < self
def __or__(self, b):
delta = abs(b._depth - self._depth)
if _hamming(self._vec, b._vec) <= delta:
return False
return True
def __sub__(self, b):
if self | b:
raise ValueError(b"concurrent pvecs")
return self._depth - b._depth
def distance(self, b):
d = abs(b._depth - self._depth)
h = _hamming(self._vec, b._vec)
return max(d, h)
def near(self, b):
dist = abs(b.depth - self._depth)
if dist > _radius or _hamming(self._vec, b._vec) > _radius:
return False
| |
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects relating to stories."""
import copy
import re
from constants import constants
from core.domain import change_domain
from core.domain import html_cleaner
import feconf
import utils
# Do not modify the values of these constants. This is to preserve backwards
# compatibility with previous change dicts.
STORY_PROPERTY_TITLE = 'title'
STORY_PROPERTY_DESCRIPTION = 'description'
STORY_PROPERTY_NOTES = 'notes'
STORY_PROPERTY_LANGUAGE_CODE = 'language_code'
STORY_NODE_PROPERTY_DESTINATION_NODE_IDS = 'destination_node_ids'
STORY_NODE_PROPERTY_ACQUIRED_SKILL_IDS = 'acquired_skill_ids'
STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS = 'prerequisite_skill_ids'
STORY_NODE_PROPERTY_OUTLINE = 'outline'
STORY_NODE_PROPERTY_TITLE = 'title'
STORY_NODE_PROPERTY_EXPLORATION_ID = 'exploration_id'
INITIAL_NODE_ID = 'initial_node_id'
CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION = 'migrate_schema_to_latest_version'
# These take additional 'property_name' and 'new_value' parameters and,
# optionally, 'old_value'.
CMD_UPDATE_STORY_PROPERTY = 'update_story_property'
CMD_UPDATE_STORY_NODE_PROPERTY = 'update_story_node_property'
CMD_UPDATE_STORY_CONTENTS_PROPERTY = 'update_story_contents_property'
# These take node_id as parameter.
CMD_ADD_STORY_NODE = 'add_story_node'
CMD_DELETE_STORY_NODE = 'delete_story_node'
CMD_UPDATE_STORY_NODE_OUTLINE_STATUS = 'update_story_node_outline_status'
# This takes additional 'title' parameters.
CMD_CREATE_NEW = 'create_new'
CMD_CHANGE_ROLE = 'change_role'
CMD_PUBLISH_STORY = 'publish_story'
CMD_UNPUBLISH_STORY = 'unpublish_story'
ROLE_MANAGER = 'manager'
ROLE_NONE = 'none'
# The prefix for all node ids of a story.
NODE_ID_PREFIX = 'node_'
class StoryChange(change_domain.BaseChange):
"""Domain object for changes made to story object.
The allowed commands, together with the attributes:
- 'add_story_node' (with node_id, title)
- 'delete_story_node' (with node_id)
- 'update_story_node_outline_status' (with node_id, old_value
and new_value)
- 'update_story_property' (with property_name, new_value
and old_value)
- 'update_story_node_property' (with property_name, new_value
and old_value)
- 'update_story_contents_property' (with property_name,
new_value and old_value)
- 'migrate_schema_to_latest_version' (with from_version and
to_version)
- 'create_new' (with title)
"""
# The allowed list of story properties which can be used in
# update_story_property command.
STORY_PROPERTIES = (
STORY_PROPERTY_TITLE, STORY_PROPERTY_DESCRIPTION,
STORY_PROPERTY_NOTES, STORY_PROPERTY_LANGUAGE_CODE)
# The allowed list of story node properties which can be used in
# update_story_node_property command.
STORY_NODE_PROPERTIES = (
STORY_NODE_PROPERTY_DESTINATION_NODE_IDS,
STORY_NODE_PROPERTY_ACQUIRED_SKILL_IDS,
STORY_NODE_PROPERTY_PREREQUISITE_SKILL_IDS, STORY_NODE_PROPERTY_OUTLINE,
STORY_NODE_PROPERTY_EXPLORATION_ID, STORY_NODE_PROPERTY_TITLE)
# The allowed list of story contente properties which can be used in
# update_story_contents_property command.
STORY_CONTENTS_PROPERTIES = (INITIAL_NODE_ID,)
ALLOWED_COMMANDS = [{
'name': CMD_UPDATE_STORY_PROPERTY,
'required_attribute_names': ['property_name', 'new_value', 'old_value'],
'optional_attribute_names': [],
'allowed_values': {'property_name': STORY_PROPERTIES}
}, {
'name': CMD_UPDATE_STORY_NODE_PROPERTY,
'required_attribute_names': [
'node_id', 'property_name', 'new_value', 'old_value'],
'optional_attribute_names': [],
'allowed_values': {'property_name': STORY_NODE_PROPERTIES}
}, {
'name': CMD_UPDATE_STORY_CONTENTS_PROPERTY,
'required_attribute_names': ['property_name', 'new_value', 'old_value'],
'optional_attribute_names': [],
'allowed_values': {'property_name': STORY_CONTENTS_PROPERTIES}
}, {
'name': CMD_ADD_STORY_NODE,
'required_attribute_names': ['node_id', 'title'],
'optional_attribute_names': []
}, {
'name': CMD_DELETE_STORY_NODE,
'required_attribute_names': ['node_id'],
'optional_attribute_names': []
}, {
'name': CMD_UPDATE_STORY_NODE_OUTLINE_STATUS,
'required_attribute_names': ['node_id', 'old_value', 'new_value'],
'optional_attribute_names': []
}, {
'name': CMD_CREATE_NEW,
'required_attribute_names': ['title'],
'optional_attribute_names': []
}, {
'name': CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION,
'required_attribute_names': ['from_version', 'to_version'],
'optional_attribute_names': []
}]
class StoryNode(object):
"""Domain object describing a node in the exploration graph of a
story.
"""
def __init__(
self, node_id, title, destination_node_ids,
acquired_skill_ids, prerequisite_skill_ids,
outline, outline_is_finalized, exploration_id):
"""Initializes a StoryNode domain object.
Args:
node_id: str. The unique id for each node.
title: str. The title of the story node.
destination_node_ids: list(str). The list of destination node ids
that this node points to in the story graph.
acquired_skill_ids: list(str). The list of skill ids acquired by
the user on completion of the node.
prerequisite_skill_ids: list(str). The list of skill ids required
before starting a node.
outline: str. Free-form annotations that a lesson implementer
can use to construct the exploration. It describes the basic
theme or template of the story and is to be provided in html
form.
outline_is_finalized: bool. Whether the outline for the story
node is finalized or not.
exploration_id: str or None. The valid exploration id that fits the
story node. It can be None initially, when the story creator
has just created a story with the basic storyline (by providing
outlines) without linking an exploration to any node.
"""
self.id = node_id
self.title = title
self.destination_node_ids = destination_node_ids
self.acquired_skill_ids = acquired_skill_ids
self.prerequisite_skill_ids = prerequisite_skill_ids
self.outline = html_cleaner.clean(outline)
self.outline_is_finalized = outline_is_finalized
self.exploration_id = exploration_id
@classmethod
def get_number_from_node_id(cls, node_id):
"""Decodes the node_id to get the number at the end of the id.
Args:
node_id: str. The id of the node.
Returns:
int. The number at the end of the id.
"""
return int(node_id.replace(NODE_ID_PREFIX, ''))
@classmethod
def get_incremented_node_id(cls, node_id):
"""Increments the next node id of the story.
Returns:
str. The new next node id.
"""
current_number = StoryNode.get_number_from_node_id(node_id)
incremented_node_id = NODE_ID_PREFIX + str(current_number + 1)
return incremented_node_id
@classmethod
def require_valid_node_id(cls, node_id):
"""Validates the node id for a StoryNode object.
Args:
node_id: str. The node id to be validated.
"""
if not isinstance(node_id, basestring):
raise utils.ValidationError(
'Expected node ID to be a string, received %s' %
node_id)
pattern = re.compile('%s[0-9]+' % NODE_ID_PREFIX)
if not pattern.match(node_id):
raise utils.ValidationError(
'Invalid node_id: %s' % node_id)
def to_dict(self):
"""Returns a dict representing this StoryNode domain object.
Returns:
A dict, mapping all fields of StoryNode instance.
"""
return {
'id': self.id,
'title': self.title,
'destination_node_ids': self.destination_node_ids,
'acquired_skill_ids': self.acquired_skill_ids,
'prerequisite_skill_ids': self.prerequisite_skill_ids,
'outline': self.outline,
'outline_is_finalized': self.outline_is_finalized,
'exploration_id': self.exploration_id
}
@classmethod
def from_dict(cls, node_dict):
"""Return a StoryNode domain object from a dict.
Args:
node_dict: dict. The dict representation of StoryNode object.
Returns:
StoryNode. The corresponding StoryNode domain object.
"""
node = cls(
node_dict['id'], node_dict['title'],
node_dict['destination_node_ids'],
node_dict['acquired_skill_ids'],
node_dict['prerequisite_skill_ids'], node_dict['outline'],
node_dict['outline_is_finalized'], node_dict['exploration_id'])
return node
@classmethod
def create_default_story_node(cls, node_id, title):
"""Returns a StoryNode domain object with default values.
Args:
node_id: str. The id of the node.
title: str. The title of the node.
Returns:
StoryNode. The StoryNode domain object with default
value.
"""
return cls(node_id, title, [], [], [], '', False, None)
def validate(self):
"""Validates various properties of the story node.
Raises:
ValidationError: One or more attributes of the story node are
invalid.
"""
if self.exploration_id:
if not isinstance(self.exploration_id, basestring):
raise utils.ValidationError(
'Expected exploration ID to be a string, received %s' %
self.exploration_id)
if not isinstance(self.outline, basestring):
raise utils.ValidationError(
'Expected outline to be a string, received %s' %
self.outline)
if not isinstance(self.title, basestring):
raise utils.ValidationError(
'Expected title to be a string, received %s' %
self.title)
if not isinstance(self.outline_is_finalized, bool):
raise utils.ValidationError(
'Expected outline_is_finalized to be a boolean, received %s' %
self.outline_is_finalized)
self.require_valid_node_id(self.id)
if not isinstance(self.prerequisite_skill_ids, list):
raise utils.ValidationError(
'Expected prerequisite skill ids to be a list, received %s' %
self.prerequisite_skill_ids)
for skill_id in self.prerequisite_skill_ids:
if not isinstance(skill_id, basestring):
raise utils.ValidationError(
'Expected each prerequisite skill id to be a string, '
'received %s' % skill_id)
if (
len(self.prerequisite_skill_ids) >
len(set(self.prerequisite_skill_ids))):
raise utils.ValidationError(
'Expected all prerequisite skills to be distinct.')
if not isinstance(self.acquired_skill_ids, list):
raise utils.ValidationError(
'Expected acquired skill ids to be a list, received %s' %
self.acquired_skill_ids)
for skill_id in self.acquired_skill_ids:
if not isinstance(skill_id, basestring):
raise utils.ValidationError(
'Expected each acquired skill id to be a string, '
'received %s' % skill_id)
if (
len(self.acquired_skill_ids) >
len(set(self.acquired_skill_ids))):
raise utils.ValidationError(
'Expected all acquired skills to be distinct.')
for skill_id in self.prerequisite_skill_ids:
if skill_id in self.acquired_skill_ids:
raise utils.ValidationError(
'Expected prerequisite skill ids and acquired skill ids '
'to be mutually exclusive. The skill_id %s intersects '
% skill_id)
if not isinstance(self.destination_node_ids, list):
raise utils.ValidationError(
'Expected destination node ids to be a list, received %s' %
self.destination_node_ids)
for node_id in self.destination_node_ids:
self.require_valid_node_id(node_id)
if node_id == self.id:
raise utils.ValidationError(
'The story node with ID %s points to itself.' % node_id)
class StoryContents(object):
"""Domain object representing the story_contents dict."""
def __init__(self, story_nodes, initial_node_id, next_node_id):
"""Constructs a StoryContents domain object.
Args:
story_nodes: list(StoryNode). The list of story nodes that are part
of this story.
initial_node_id: str. The id of the starting node of the story.
next_node_id: str. The id for the next node to be added to the
story.
"""
self.initial_node_id = initial_node_id
self.nodes = story_nodes
self.next_node_id = next_node_id
def validate(self):
"""Validates various properties of the story contents object.
Raises:
ValidationError: One or more attributes of the story contents are
invalid.
"""
if not isinstance(self.nodes, list):
raise utils.ValidationError(
'Expected nodes field to be a list, received %s' % self.nodes)
if len(self.nodes) > 0:
StoryNode.require_valid_node_id(self.initial_node_id)
StoryNode.require_valid_node_id(self.next_node_id)
initial_node_is_present = False
node_id_list = []
for node in self.nodes:
if not isinstance(node, StoryNode):
raise utils.ValidationError(
'Expected each node to be a StoryNode object, received %s' %
node)
node.validate()
for destination_node_id in node.destination_node_ids:
if next(
(node for node in self.nodes
if node.id == destination_node_id), None) is None:
raise utils.ValidationError(
'Expected all destination nodes to exist')
if node.id == self.initial_node_id:
initial_node_is_present = True
# Checks whether the number in the id of any node is greater than
# the value of next_node_id.
if (StoryNode.get_number_from_node_id(node.id) >=
StoryNode.get_number_from_node_id(self.next_node_id)):
raise utils.ValidationError(
'The node with id %s is out of bounds.' % node.id)
node_id_list.append(node.id)
if len(self.nodes) > 0:
if not initial_node_is_present:
raise utils.ValidationError('Expected starting node to exist.')
if len(node_id_list) > len(set(node_id_list)):
raise utils.ValidationError(
'Expected all node ids to be distinct.')
# nodes_queue stores the pending nodes to visit in the story that
# are unlocked, in a 'queue' form with a First In First Out
# structure.
nodes_queue = []
is_node_visited = [False] * len(self.nodes)
starting_node_index = self.get_node_index(self.initial_node_id)
nodes_queue.append(self.nodes[starting_node_index].id)
# The user is assumed to have all the prerequisite skills of the
# starting node before starting the story. Also, this list models
# the skill IDs acquired by a learner as they progress through the
# story.
simulated_skill_ids = copy.deepcopy(
self.nodes[starting_node_index].prerequisite_skill_ids)
# The following loop employs a Breadth First Search from the given
# starting node and makes sure that the user has acquired all the
# prerequisite skills required by the destination nodes 'unlocked'
# by visiting a particular node by the time that node is finished.
while len(nodes_queue) > 0:
current_node_id = nodes_queue.pop()
current_node_index = self.get_node_index(current_node_id)
is_node_visited[current_node_index] = True
current_node = self.nodes[current_node_index]
for skill_id in current_node.acquired_skill_ids:
simulated_skill_ids.append(skill_id)
for node_id in current_node.destination_node_ids:
node_index = self.get_node_index(node_id)
# The following condition checks whether the destination
# node for a particular node, has already been visited, in
# which case the story would have loops, which are not
# allowed.
if is_node_visited[node_index]:
raise utils.ValidationError(
'Loops are not allowed in stories.')
destination_node = self.nodes[node_index]
if not (
set(
destination_node.prerequisite_skill_ids
).issubset(simulated_skill_ids)):
raise utils.ValidationError(
'The prerequisite skills ' +
' '.join(
set(destination_node.prerequisite_skill_ids) -
set(simulated_skill_ids)) +
' were not completed before the node with id %s'
' was unlocked.' % node_id)
nodes_queue.append(node_id)
for index, node_visited in enumerate(is_node_visited):
if not node_visited:
raise utils.ValidationError(
'The node with id %s is disconnected from the '
'story graph.' % self.nodes[index].id)
def get_node_index(self, node_id):
"""Returns the index of the story node with the given node
id, or None if the node id is not in the story contents dict.
Args:
node_id: str. The id of the node.
Returns:
int or None. The index of the corresponding node, or None if there
is no such node.
"""
for ind, node in enumerate(self.nodes):
if node.id == node_id:
return ind
return None
def get_ordered_nodes(self):
"""Returns a list of nodes ordered by how they would appear sequentially
to a learner.
NOTE: Currently, this function assumes only a linear arrangement of
nodes.
Returns:
list(StoryNode). The ordered list of nodes.
"""
initial_index = self.get_node_index(self.initial_node_id)
current_node = self.nodes[initial_index]
ordered_nodes_list = [current_node]
while current_node.destination_node_ids:
next_node_id = current_node.destination_node_ids[0]
current_node = self.nodes[self.get_node_index(next_node_id)]
ordered_nodes_list.append(current_node)
return ordered_nodes_list
def to_dict(self):
"""Returns a dict representing this StoryContents domain object.
Returns:
A dict, mapping all fields of StoryContents instance.
"""
return {
'nodes': [
node.to_dict() for node in self.nodes
],
'initial_node_id': self.initial_node_id,
'next_node_id': self.next_node_id
}
@classmethod
def from_dict(cls, story_contents_dict):
"""Return a StoryContents domain object from a dict.
Args:
story_contents_dict: dict. The dict representation of
StoryContents object.
Returns:
StoryContents. The corresponding StoryContents domain object.
"""
story_contents = cls(
[
StoryNode.from_dict(story_node_dict)
for story_node_dict in story_contents_dict['nodes']
], story_contents_dict['initial_node_id'],
story_contents_dict['next_node_id']
)
return story_contents
class Story(object):
"""Domain object for an Oppia Story."""
def __init__(
self, story_id, title, description, notes,
story_contents, story_contents_schema_version, language_code,
corresponding_topic_id, version, created_on=None,
last_updated=None):
"""Constructs a Story domain object.
Args:
story_id: str. The unique ID of the story.
title: str. The title of the story.
description: str. The high level description of the story.
notes: str. A set of notes, that describe the characters,
main storyline, and setting. To be provided in html form.
story_contents: StoryContents. The StoryContents instance
representing the contents (like nodes) that are part of the
story.
story_contents_schema_version: int. The schema version for the
story contents object.
language_code: str. The ISO 639-1 code for the language this
story is written in.
corresponding_topic_id: str. The id of the topic to which the story
belongs.
version: int. The version of the story.
created_on: datetime.datetime. Date and time when the story is
created.
last_updated: datetime.datetime. Date and time when the
story was last updated.
"""
self.id = story_id
self.title = title
self.description = description
self.notes = html_cleaner.clean(notes)
self.story_contents = story_contents
self.story_contents_schema_version = story_contents_schema_version
self.language_code = language_code
self.corresponding_topic_id = corresponding_topic_id
self.created_on = created_on
self.last_updated = last_updated
self.version = version
def validate(self):
"""Validates various properties of the story object.
Raises:
ValidationError: One or more attributes of story are invalid.
"""
self.require_valid_title(self.title)
if not isinstance(self.description, basestring):
raise utils.ValidationError(
'Expected description to be a string, received %s'
% self.description)
if not isinstance(self.notes, basestring):
raise utils.ValidationError(
'Expected notes to be a string, received %s' % self.notes)
if not isinstance(self.story_contents_schema_version, int):
raise utils.ValidationError(
'Expected story contents schema version to be an integer, '
'received %s' % self.story_contents_schema_version)
if (self.story_contents_schema_version !=
feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION):
raise utils.ValidationError(
'Expected story contents schema version to be %s, '
'received %s' % (
feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION,
self.story_contents_schema_version))
if not isinstance(self.language_code, basestring):
raise utils.ValidationError(
'Expected language code to be a string, received %s' %
self.language_code)
if not utils.is_valid_language_code(self.language_code):
raise utils.ValidationError(
'Invalid language code: %s' % self.language_code)
if not isinstance(self.corresponding_topic_id, basestring):
raise utils.ValidationError(
'Expected corresponding_topic_id should be a string, received: '
'%s' % self.corresponding_topic_id)
self.story_contents.validate()
@classmethod
def require_valid_story_id(cls, story_id):
"""Checks whether the story id is a valid one.
Args:
story_id: str. The story id to validate.
"""
if not isinstance(story_id, basestring):
raise utils.ValidationError(
'Story id should be a string, received: %s' % story_id)
if len(story_id) != 12:
raise utils.ValidationError('Invalid story id.')
@classmethod
def require_valid_title(cls, title):
"""Checks whether the story title is a valid one.
Args:
title: str. The title to validate.
"""
if not isinstance(title, basestring):
raise utils.ValidationError('Title should be a string.')
if title == '':
raise utils.ValidationError('Title field should not be empty')
def get_acquired_skill_ids_for_node_ids(self, node_ids):
"""Returns the acquired skill ids of the nodes having the given
node ids.
Args:
node_ids: list(str). The list of IDs of the nodes inside
the story.
Returns:
list(str). The union of the acquired skill IDs corresponding to
each of the node IDs.
"""
acquired_skill_ids = []
for node in self.story_contents.nodes:
if node.id in node_ids:
for skill_id in node.acquired_skill_ids:
if skill_id not in acquired_skill_ids:
acquired_skill_ids.append(skill_id)
return acquired_skill_ids
def get_prerequisite_skill_ids_for_exp_id(self, exp_id):
"""Returns the prerequisite skill ids of the node having the given
exploration id.
Args:
exp_id: str. The ID of the exploration linked to the story,
Returns:
list(str)|None. The list of prerequisite skill ids for the
exploration or None, if no node is linked to it.
"""
for node in self.story_contents.nodes:
if node.exploration_id == exp_id:
return node.prerequisite_skill_ids
return None
def has_exploration(self, exp_id):
"""Checks whether an exploration is present in the story.
Args:
exp_id: str. The ID of the exploration linked to the story,
Returns:
bool. Whether the exploration is linked to the story.
"""
for node in self.story_contents.nodes:
if node.exploration_id == exp_id:
return True
return False
def to_dict(self):
"""Returns a dict representing this Story domain object.
Returns:
A dict, mapping all fields of Story instance.
"""
return {
'id': self.id,
'title': self.title,
'description': self.description,
'notes': self.notes,
'language_code': self.language_code,
'story_contents_schema_version': self.story_contents_schema_version,
'corresponding_topic_id': self.corresponding_topic_id,
'version': self.version,
'story_contents': self.story_contents.to_dict()
}
@classmethod
def create_default_story(cls, story_id, title, corresponding_topic_id):
"""Returns a story domain object with default values. This is for
the frontend where a default blank story would be shown to the user
when the story is created for the first time.
Args:
story_id: str. The unique id of the story.
title: str. The title for the newly created story.
corresponding_topic_id: str. The id of the topic to which the story
belongs.
Returns:
Story. The Story domain object with the default values.
"""
# Initial node id for a new story.
initial_node_id = '%s1' % NODE_ID_PREFIX
story_contents = StoryContents([], None, initial_node_id)
return cls(
story_id, title,
feconf.DEFAULT_STORY_DESCRIPTION, feconf.DEFAULT_STORY_NOTES,
story_contents, feconf.CURRENT_STORY_CONTENTS_SCHEMA_VERSION,
constants.DEFAULT_LANGUAGE_CODE, corresponding_topic_id, 0)
@classmethod
def update_story_contents_from_model(
cls, versioned_story_contents, current_version):
"""Converts the story_contents blob contained in the given
versioned_story_contents dict from current_version to
current_version + 1. Note that the versioned_story_contents being
passed in is modified in-place.
Args:
versioned_story_contents: dict. A dict with two keys:
- schema_version: str. The schema version for the
story_contents dict.
- story_contents: dict. The dict comprising the story
contents.
current_version: int. The current schema version of story_contents.
"""
versioned_story_contents['schema_version'] = current_version + 1
conversion_fn = getattr(
cls, '_convert_story_contents_v%s_dict_to_v%s_dict' % (
current_version, current_version + 1))
versioned_story_contents['story_contents'] = conversion_fn(
versioned_story_contents['story_contents'])
def update_title(self, title):
"""Updates the title of the story.
Args:
title: str. The new title of the story.
"""
self.title = title
def update_description(self, description):
"""Updates the description of the story.
Args:
description: str. The new description of the story.
"""
self.description = description
def update_notes(self, notes):
"""Updates the notes of the story.
Args:
notes: str. The new notes of the story.
"""
self.notes = notes
def update_language_code(self, language_code):
"""Updates the language code of the story.
Args:
language_code: str. The new language code of the story.
"""
self.language_code = language_code
def add_node(self, desired_node_id, node_title):
"""Adds a new default node with the id as story_contents.next_node_id.
Args:
desired_node_id: str. The node id to be given to the new node in the
story.
node_title: str. The title for the new story node.
Raises:
Exception: The desired_node_id differs from
story_contents.next_node_id.
"""
if self.story_contents.next_node_id != desired_node_id:
raise Exception(
'The node id %s does not match the expected '
'next node id for the story.' % desired_node_id)
self.story_contents.nodes.append(
StoryNode.create_default_story_node(desired_node_id, node_title))
self.story_contents.next_node_id = (
StoryNode.get_incremented_node_id(self.story_contents.next_node_id))
if self.story_contents.initial_node_id is None:
self.story_contents.initial_node_id = desired_node_id
def _check_exploration_id_already_present(self, exploration_id):
"""Returns whether a node with the given exploration id is already
present in story_contents.
Args:
exploration_id: str. The id of the exploration.
Returns:
bool. Whether a node with the given exploration ID is already
present.
"""
for node in self.story_contents.nodes:
if node.exploration_id == exploration_id:
return True
return False
def delete_node(self, node_id):
"""Deletes a node with the given node_id.
Args:
node_id: str. The id of the node.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story' % node_id)
if node_id == self.story_contents.initial_node_id:
if len(self.story_contents.nodes) == 1:
self.story_contents.initial_node_id = None
else:
raise ValueError(
'The node with id %s is the starting node for the story, '
'change the starting node before deleting it.' % node_id)
for node in self.story_contents.nodes:
if node_id in node.destination_node_ids:
node.destination_node_ids.remove(node_id)
del self.story_contents.nodes[node_index]
def update_node_outline(self, node_id, new_outline):
"""Updates the outline field of a given node.
Args:
node_id: str. The id of the node.
new_outline: str. The new outline of the given node.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story' % node_id)
self.story_contents.nodes[node_index].outline = new_outline
def update_node_title(self, node_id, new_title):
"""Updates the title field of a given node.
Args:
node_id: str. The id of the node.
new_title: str. The new title of the given node.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story' % node_id)
self.story_contents.nodes[node_index].title = new_title
def mark_node_outline_as_finalized(self, node_id):
"""Updates the outline_is_finalized field of the node with the given
node_id as True.
Args:
node_id: str. The id of the node.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story' % node_id)
self.story_contents.nodes[node_index].outline_is_finalized = True
def mark_node_outline_as_unfinalized(self, node_id):
"""Updates the outline_is_finalized field of the node with the given
node_id as False.
Args:
node_id: str. The id of the node.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story' % node_id)
self.story_contents.nodes[node_index].outline_is_finalized = False
def update_node_acquired_skill_ids(self, node_id, new_acquired_skill_ids):
"""Updates the acquired skill ids field of a given node.
Args:
node_id: str. The id of the node.
new_acquired_skill_ids: list(str). The updated acquired skill id
list.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story' % node_id)
self.story_contents.nodes[node_index].acquired_skill_ids = (
new_acquired_skill_ids)
def update_node_prerequisite_skill_ids(
self, node_id, new_prerequisite_skill_ids):
"""Updates the prerequisite skill ids field of a given node.
Args:
node_id: str. The id of the node.
new_prerequisite_skill_ids: list(str). The updated prerequisite
skill id list.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story' % node_id)
self.story_contents.nodes[node_index].prerequisite_skill_ids = (
new_prerequisite_skill_ids)
def update_node_destination_node_ids(
self, node_id, new_destination_node_ids):
"""Updates the destination_node_ids field of a given node.
Args:
node_id: str. The id of the node.
new_destination_node_ids: list(str). The updated destination
node id list.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story' % node_id)
self.story_contents.nodes[node_index].destination_node_ids = (
new_destination_node_ids)
def update_node_exploration_id(
self, node_id, new_exploration_id):
"""Updates the exploration id field of a given node.
Args:
node_id: str. The id of the node.
new_exploration_id: str. The updated exploration id for a node.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story.' % node_id)
if self._check_exploration_id_already_present(new_exploration_id):
raise ValueError(
'A node with exploration id %s already exists.' %
new_exploration_id)
self.story_contents.nodes[node_index].exploration_id = (
new_exploration_id)
def update_initial_node(self, new_initial_node_id):
"""Updates the starting node of the story.
Args:
new_initial_node_id: str. The new starting node id.
Raises:
ValueError: The node is not part of the story.
"""
node_index = self.story_contents.get_node_index(new_initial_node_id)
if node_index is None:
raise ValueError(
'The node with id %s is not part of this story.'
% new_initial_node_id)
self.story_contents.initial_node_id = new_initial_node_id
class StorySummary(object):
"""Domain object for Story Summary."""
def __init__(
self, story_id, title, description, language_code, version,
node_count, story_model_created_on,
story_model_last_updated):
"""Constructs a StorySummary domain object.
Args:
story_id: str. The unique id of the story.
title: str. The title of the story.
description: str. The description of the story.
language_code: str. The language code of the story.
version: int. The version of the story.
node_count: int. The number of nodes present in the story.
story_model_created_on: datetime.datetime. Date and time when
the story model is created.
story_model_last_updated: datetime.datetime. Date and time
when the story model was last updated.
"""
self.id = story_id
self.title = title
self.description = description
self.language_code = language_code
self.version = version
self.node_count = node_count
self.story_model_created_on = story_model_created_on
self.story_model_last_updated = story_model_last_updated
def validate(self):
"""Validates various properties of the story summary object.
Raises:
ValidationError: One or more attributes of story summary are
invalid.
"""
if not isinstance(self.title, basestring):
raise utils.ValidationError(
'Expected title to be a string, received %s' % self.title)
if self.title == '':
raise utils.ValidationError('Title field should not be empty')
if not isinstance(self.description, basestring):
raise utils.ValidationError(
'Expected description to be a string, received %s'
% self.description)
if not isinstance(self.node_count, int):
raise utils.ValidationError(
'Expected node_count to be an int, received \'%s\'' % (
self.node_count))
if self.node_count < 0:
raise utils.ValidationError(
'Expected node_count to be non-negative, received \'%s\'' % (
self.node_count))
if not isinstance(self.language_code, basestring):
raise utils.ValidationError(
'Expected language code to be a string, received %s' %
self.language_code)
if not utils.is_valid_language_code(self.language_code):
raise utils.ValidationError(
'Invalid language code: %s' % self.language_code)
def to_dict(self):
"""Returns a dictionary representation of this domain object.
Returns:
dict. A dict representing this StorySummary object.
"""
return {
'id': self.id,
'title': self.title,
'description': self.description,
'language_code': self.language_code,
'version': self.version,
'node_count': self.node_count,
'story_model_created_on': utils.get_time_in_millisecs(
self.story_model_created_on),
'story_model_last_updated': utils.get_time_in_millisecs(
self.story_model_last_updated)
}
def to_human_readable_dict(self):
"""Returns a dictionary representation of this domain object.
Returns:
dict. A dict representing this StorySummary object.
"""
return {
'id': self.id,
'title': self.title,
'description': self.description
}
class StoryRights(object):
"""Domain object for story rights."""
def __init__(self, story_id, manager_ids, story_is_published):
"""Constructs a StoryRights domain object.
Args:
story_id: str. The id of the story.
manager_ids: list(str). The id of the users who have been assigned
as managers for the story.
story_is_published: bool. Whether the story is viewable by a
learner.
"""
self.id = story_id
self.manager_ids = manager_ids
self.story_is_published = story_is_published
def to_dict(self):
"""Returns a dict suitable for use by the frontend.
Returns:
dict. A dict version of StoryRights suitable for use by the
frontend.
"""
return {
'story_id': self.id,
'manager_names': self.manager_ids,
'story_is_published': self.story_is_published
}
def is_manager(self, user_id):
"""Checks whether given user is a manager of the story.
Args:
user_id: str or None. Id of the user.
Returns:
bool. Whether user is a manager of this story.
"""
return bool(user_id in self.manager_ids)
class StoryRightsChange(change_domain.BaseChange):
"""Domain object for changes made to a story rights object.
The allowed commands, together with the attributes:
- 'change_role' (with assignee_id, new_role and old_role)
- 'create_new'
- 'publish_story'
- 'unpublish_story'.
"""
# The allowed list of roles which can be used in change_role command.
ALLOWED_ROLES = [ROLE_NONE, ROLE_MANAGER]
ALLOWED_COMMANDS = [{
'name': CMD_CREATE_NEW,
'required_attribute_names': [],
'optional_attribute_names': []
}, {
'name': CMD_CHANGE_ROLE,
'required_attribute_names': ['assignee_id', 'new_role', 'old_role'],
'optional_attribute_names': [],
'allowed_values': {'new_role': ALLOWED_ROLES, 'old_role': ALLOWED_ROLES}
}, {
'name': CMD_PUBLISH_STORY,
'required_attribute_names': [],
'optional_attribute_names': []
}, {
'name': CMD_UNPUBLISH_STORY,
'required_attribute_names': [],
'optional_attribute_names': []
}]
| |
# Copyright IBM Corp. 2013 All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
GPFS Volume Driver.
"""
import math
import os
import re
import shutil
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import units
import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.image import image_utils
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume import driver
GPFS_CLONE_MIN_RELEASE = 1200
LOG = logging.getLogger(__name__)
gpfs_opts = [
cfg.StrOpt('gpfs_mount_point_base',
default=None,
help='Specifies the path of the GPFS directory where Block '
'Storage volume and snapshot files are stored.'),
cfg.StrOpt('gpfs_images_dir',
default=None,
help='Specifies the path of the Image service repository in '
'GPFS. Leave undefined if not storing images in GPFS.'),
cfg.StrOpt('gpfs_images_share_mode',
default='copy_on_write',
choices=['copy', 'copy_on_write'],
help='Specifies the type of image copy to be used. Set this '
'when the Image service repository also uses GPFS so '
'that image files can be transferred efficiently from '
'the Image service to the Block Storage service. There '
'are two valid values: "copy" specifies that a full copy '
'of the image is made; "copy_on_write" specifies that '
'copy-on-write optimization strategy is used and '
'unmodified blocks of the image file are shared '
'efficiently.'),
cfg.IntOpt('gpfs_max_clone_depth',
default=0,
help='Specifies an upper limit on the number of indirections '
'required to reach a specific block due to snapshots or '
'clones. A lengthy chain of copy-on-write snapshots or '
'clones can have a negative impact on performance, but '
'improves space utilization. 0 indicates unlimited '
'clone depth.'),
cfg.BoolOpt('gpfs_sparse_volumes',
default=True,
help=('Specifies that volumes are created as sparse files '
'which initially consume no space. If set to False, the '
'volume is created as a fully allocated file, in which '
'case, creation may take a significantly longer time.')),
cfg.StrOpt('gpfs_storage_pool',
default='system',
help=('Specifies the storage pool that volumes are assigned '
'to. By default, the system storage pool is used.')),
]
CONF = cfg.CONF
CONF.register_opts(gpfs_opts)
def _different(difference_tuple):
"""Return true if two elements of a tuple are different."""
if difference_tuple:
member1, member2 = difference_tuple
return member1 != member2
else:
return False
def _same_filesystem(path1, path2):
"""Return true if the two paths are in the same GPFS file system."""
return os.lstat(path1).st_dev == os.lstat(path2).st_dev
def _sizestr(size_in_g):
"""Convert the specified size into a string value."""
return '%sG' % size_in_g
class GPFSDriver(driver.VolumeDriver):
"""Implements volume functions using GPFS primitives.
Version history:
1.0.0 - Initial driver
1.1.0 - Add volume retype, refactor volume migration
1.2.0 - Add consistency group support
"""
VERSION = "1.2.0"
def __init__(self, *args, **kwargs):
super(GPFSDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(gpfs_opts)
def _get_gpfs_state(self):
"""Return GPFS state information."""
try:
(out, err) = self._execute('mmgetstate', '-Y', run_as_root=True)
return out
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('Failed to issue mmgetstate command, error: %s.'),
exc.stderr)
raise exception.VolumeBackendAPIException(data=exc.stderr)
def _check_gpfs_state(self):
"""Raise VolumeBackendAPIException if GPFS is not active."""
out = self._get_gpfs_state()
lines = out.splitlines()
state_token = lines[0].split(':').index('state')
gpfs_state = lines[1].split(':')[state_token]
if gpfs_state != 'active':
LOG.error(_LE('GPFS is not active. Detailed output: %s.'), out)
exception_message = (_('GPFS is not running, state: %s.') %
gpfs_state)
raise exception.VolumeBackendAPIException(data=exception_message)
def _get_filesystem_from_path(self, path):
"""Return filesystem for specified path."""
try:
(out, err) = self._execute('df', path, run_as_root=True)
lines = out.splitlines()
filesystem = lines[1].split()[0]
return filesystem
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('Failed to issue df command for path %(path)s, '
'error: %(error)s.'),
{'path': path,
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
def _get_gpfs_cluster_id(self):
"""Return the id for GPFS cluster being used."""
try:
(out, err) = self._execute('mmlsconfig', 'clusterId', '-Y',
run_as_root=True)
lines = out.splitlines()
value_token = lines[0].split(':').index('value')
cluster_id = lines[1].split(':')[value_token]
return cluster_id
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('Failed to issue mmlsconfig command, error: %s.'),
exc.stderr)
raise exception.VolumeBackendAPIException(data=exc.stderr)
def _get_fileset_from_path(self, path):
"""Return the GPFS fileset for specified path."""
fs_regex = re.compile(r'.*fileset.name:\s+(?P<fileset>\w+)', re.S)
try:
(out, err) = self._execute('mmlsattr', '-L', path,
run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('Failed to issue mmlsattr command on path %(path)s, '
'error: %(error)s'),
{'path': path,
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
try:
fileset = fs_regex.match(out).group('fileset')
return fileset
except AttributeError as exc:
msg = (_('Failed to find fileset for path %(path)s, command '
'output: %(cmdout)s.') %
{'path': path,
'cmdout': out})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _verify_gpfs_pool(self, storage_pool):
"""Return true if the specified pool is a valid GPFS storage pool."""
try:
self._execute('mmlspool', self._gpfs_device, storage_pool,
run_as_root=True)
return True
except processutils.ProcessExecutionError:
return False
def _update_volume_storage_pool(self, local_path, new_pool):
"""Set the storage pool for a volume to the specified value."""
if new_pool is None:
new_pool = 'system'
if not self._verify_gpfs_pool(new_pool):
msg = (_('Invalid storage pool %s requested. Retype failed.') %
new_pool)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
try:
self._execute('mmchattr', '-P', new_pool, local_path,
run_as_root=True)
LOG.debug('Updated storage pool with mmchattr to %s.', new_pool)
return True
except processutils.ProcessExecutionError as exc:
LOG.info(_LI('Could not update storage pool with mmchattr to '
'%(pool)s, error: %(error)s'),
{'pool': new_pool,
'error': exc.stderr})
return False
def _get_gpfs_fs_release_level(self, path):
"""Return the GPFS version of the specified file system.
The file system is specified by any valid path it contains.
"""
filesystem = self._get_filesystem_from_path(path)
try:
(out, err) = self._execute('mmlsfs', filesystem, '-V', '-Y',
run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('Failed to issue mmlsfs command for path %(path)s, '
'error: %(error)s.'),
{'path': path,
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
lines = out.splitlines()
value_token = lines[0].split(':').index('data')
fs_release_level_str = lines[1].split(':')[value_token]
# at this point, release string looks like "13.23 (3.5.0.7)"
# extract first token and convert to whole number value
fs_release_level = int(float(fs_release_level_str.split()[0]) * 100)
return filesystem, fs_release_level
def _get_gpfs_cluster_release_level(self):
"""Return the GPFS version of current cluster."""
try:
(out, err) = self._execute('mmlsconfig', 'minreleaseLeveldaemon',
'-Y', run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('Failed to issue mmlsconfig command, error: %s.'),
exc.stderr)
raise exception.VolumeBackendAPIException(data=exc.stderr)
lines = out.splitlines()
value_token = lines[0].split(':').index('value')
min_release_level = lines[1].split(':')[value_token]
return int(min_release_level)
def _is_gpfs_path(self, directory):
"""Determine if the specified path is in a gpfs file system.
If not part of a gpfs file system, raise ProcessExecutionError.
"""
try:
self._execute('mmlsattr', directory, run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('Failed to issue mmlsattr command '
'for path %(path)s, '
'error: %(error)s.'),
{'path': directory,
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
def _is_same_fileset(self, path1, path2):
"""Return true if the two paths are in the same GPFS fileset."""
if self._get_fileset_from_path(path1) == \
self._get_fileset_from_path(path2):
return True
return False
def _same_cluster(self, host):
"""Return true if the host is a member of the same GPFS cluster."""
dest_location = host['capabilities'].get('location_info')
if self._stats['location_info'] == dest_location:
return True
return False
def _set_rw_permission(self, path, modebits='660'):
"""Set permission bits for the path."""
self._execute('chmod', modebits, path, run_as_root=True)
def _can_migrate_locally(self, host):
"""Return true if the host can migrate a volume locally."""
if 'location_info' not in host['capabilities']:
LOG.debug('Evaluate migration: no location info, '
'cannot migrate locally.')
return None
info = host['capabilities']['location_info']
try:
(dest_type, dest_id, dest_path) = info.split(':')
except ValueError:
LOG.debug('Evaluate migration: unexpected location info, '
'cannot migrate locally: %s.', info)
return None
if dest_type != 'GPFSDriver' or dest_id != self._cluster_id:
LOG.debug('Evaluate migration: different destination driver or '
'cluster id in location info: %s.', info)
return None
LOG.debug('Evaluate migration: use local migration.')
return dest_path
def do_setup(self, ctxt):
"""Determine storage back end capabilities."""
try:
self._cluster_id = self._get_gpfs_cluster_id()
except Exception as setup_exception:
msg = (_('Could not find GPFS cluster id: %s.') %
setup_exception)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
try:
gpfs_base = self.configuration.gpfs_mount_point_base
self._gpfs_device = self._get_filesystem_from_path(gpfs_base)
except Exception as setup_exception:
msg = (_('Could not find GPFS file system device: %s.') %
setup_exception)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
pool = self.configuration.safe_get('gpfs_storage_pool')
self._storage_pool = pool
if not self._verify_gpfs_pool(self._storage_pool):
msg = (_('Invalid storage pool %s specificed.') %
self._storage_pool)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
self._check_gpfs_state()
if self.configuration.gpfs_mount_point_base is None:
msg = _('Option gpfs_mount_point_base is not set correctly.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if (self.configuration.gpfs_images_share_mode and
self.configuration.gpfs_images_share_mode not in ['copy_on_write',
'copy']):
msg = _('Option gpfs_images_share_mode is not set correctly.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if(self.configuration.gpfs_images_share_mode and
self.configuration.gpfs_images_dir is None):
msg = _('Option gpfs_images_dir is not set correctly.')
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if(self.configuration.gpfs_images_share_mode == 'copy_on_write' and
not _same_filesystem(self.configuration.gpfs_mount_point_base,
self.configuration.gpfs_images_dir)):
msg = (_('gpfs_images_share_mode is set to copy_on_write, but '
'%(vol)s and %(img)s belong to different file '
'systems.') %
{'vol': self.configuration.gpfs_mount_point_base,
'img': self.configuration.gpfs_images_dir})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if(self.configuration.gpfs_images_share_mode == 'copy_on_write' and
not self._is_same_fileset(self.configuration.gpfs_mount_point_base,
self.configuration.gpfs_images_dir)):
msg = (_('gpfs_images_share_mode is set to copy_on_write, but '
'%(vol)s and %(img)s belong to different filesets.') %
{'vol': self.configuration.gpfs_mount_point_base,
'img': self.configuration.gpfs_images_dir})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
_gpfs_cluster_release_level = self._get_gpfs_cluster_release_level()
if not _gpfs_cluster_release_level >= GPFS_CLONE_MIN_RELEASE:
msg = (_('Downlevel GPFS Cluster Detected. GPFS Clone feature '
'not enabled in cluster daemon level %(cur)s - must '
'be at least at level %(min)s.') %
{'cur': _gpfs_cluster_release_level,
'min': GPFS_CLONE_MIN_RELEASE})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
for directory in [self.configuration.gpfs_mount_point_base,
self.configuration.gpfs_images_dir]:
if directory is None:
continue
if not directory.startswith('/'):
msg = (_('%s must be an absolute path.') % directory)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if not os.path.isdir(directory):
msg = (_('%s is not a directory.') % directory)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
# Check if GPFS is mounted
self._verify_gpfs_path_state(directory)
filesystem, fslevel = \
self._get_gpfs_fs_release_level(directory)
if not fslevel >= GPFS_CLONE_MIN_RELEASE:
msg = (_('The GPFS filesystem %(fs)s is not at the required '
'release level. Current level is %(cur)s, must be '
'at least %(min)s.') %
{'fs': filesystem,
'cur': fslevel,
'min': GPFS_CLONE_MIN_RELEASE})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def _create_sparse_file(self, path, size):
"""Creates file with 0 disk usage."""
sizestr = _sizestr(size)
self._execute('truncate', '-s', sizestr, path, run_as_root=True)
def _allocate_file_blocks(self, path, size):
"""Preallocate file blocks by writing zeros."""
block_size_mb = 1
block_count = size * units.Gi / (block_size_mb * units.Mi)
self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
'bs=%dM' % block_size_mb,
'count=%d' % block_count,
run_as_root=True)
def _gpfs_change_attributes(self, options, path):
"""Update GPFS attributes on the specified file."""
cmd = ['mmchattr']
cmd.extend(options)
cmd.append(path)
LOG.debug('Update volume attributes with mmchattr to %s.', options)
self._execute(*cmd, run_as_root=True)
def _set_volume_attributes(self, volume, path, metadata):
"""Set various GPFS attributes for this volume."""
set_pool = False
options = []
for item in metadata:
if item['key'] == 'data_pool_name':
options.extend(['-P', item['value']])
set_pool = True
elif item['key'] == 'replicas':
options.extend(['-r', item['value'], '-m', item['value']])
elif item['key'] == 'dio':
options.extend(['-D', item['value']])
elif item['key'] == 'write_affinity_depth':
options.extend(['--write-affinity-depth', item['value']])
elif item['key'] == 'block_group_factor':
options.extend(['--block-group-factor', item['value']])
elif item['key'] == 'write_affinity_failure_group':
options.extend(['--write-affinity-failure-group',
item['value']])
# metadata value has precedence over value set in volume type
if self.configuration.gpfs_storage_pool and not set_pool:
options.extend(['-P', self.configuration.gpfs_storage_pool])
if options:
self._gpfs_change_attributes(options, path)
fstype = None
fslabel = None
for item in metadata:
if item['key'] == 'fstype':
fstype = item['value']
elif item['key'] == 'fslabel':
fslabel = item['value']
if fstype:
self._mkfs(volume, fstype, fslabel)
def create_volume(self, volume):
"""Creates a GPFS volume."""
# Check if GPFS is mounted
self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base)
volume_path = self.local_path(volume)
volume_size = volume['size']
# Create a sparse file first; allocate blocks later if requested
self._create_sparse_file(volume_path, volume_size)
self._set_rw_permission(volume_path)
# Set the attributes prior to allocating any blocks so that
# they are allocated according to the policy
v_metadata = volume.get('volume_metadata')
self._set_volume_attributes(volume, volume_path, v_metadata)
if not self.configuration.gpfs_sparse_volumes:
self._allocate_file_blocks(volume_path, volume_size)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a GPFS volume from a snapshot."""
snapshot_path = self._get_snapshot_path(snapshot)
# check if the snapshot lies in the same CG as the volume to be created
# if yes, clone the volume from the snapshot, else perform full copy
clone = False
if volume['consistencygroup_id'] is not None:
ctxt = context.get_admin_context()
snap_parent_vol = self.db.volume_get(ctxt, snapshot['volume_id'])
if (volume['consistencygroup_id'] ==
snap_parent_vol['consistencygroup_id']):
clone = True
volume_path = self.local_path(volume)
if clone:
self._create_gpfs_copy(src=snapshot_path, dest=volume_path)
self._gpfs_redirect(volume_path)
else:
self._gpfs_full_copy(snapshot_path, volume_path)
self._set_rw_permission(volume_path)
v_metadata = volume.get('volume_metadata')
self._set_volume_attributes(volume, volume_path, v_metadata)
virt_size = self._resize_volume_file(volume, volume['size'])
return {'size': math.ceil(virt_size / units.Gi)}
def create_cloned_volume(self, volume, src_vref):
"""Create a GPFS volume from another volume."""
src = self.local_path(src_vref)
dest = self.local_path(volume)
if (volume['consistencygroup_id'] == src_vref['consistencygroup_id']):
self._create_gpfs_clone(src, dest)
else:
self._gpfs_full_copy(src, dest)
self._set_rw_permission(dest)
v_metadata = volume.get('volume_metadata')
self._set_volume_attributes(volume, dest, v_metadata)
virt_size = self._resize_volume_file(volume, volume['size'])
return {'size': math.ceil(virt_size / units.Gi)}
def _delete_gpfs_file(self, fchild):
"""Delete a GPFS file and cleanup clone children."""
if not os.path.exists(fchild):
return
(out, err) = self._execute('mmclone', 'show', fchild, run_as_root=True)
fparent = None
inode_regex = re.compile(
r'.*\s+(?:yes|no)\s+\d+\s+(?P<inode>\d+)', re.M | re.S)
match = inode_regex.match(out)
if match:
inode = match.group('inode')
path = os.path.dirname(fchild)
(out, err) = self._execute('find', path, '-maxdepth', '1',
'-inum', inode, run_as_root=True)
if out:
fparent = out.split('\n', 1)[0]
self._execute(
'rm', '-f', fchild, check_exit_code=False, run_as_root=True)
# There is no need to check for volume references on this snapshot
# because 'rm -f' itself serves as a simple and implicit check. If the
# parent is referenced by another volume, GPFS doesn't allow deleting
# it. 'rm -f' silently fails and the subsequent check on the path
# indicates whether there are any volumes derived from that snapshot.
# If there are such volumes, we quit recursion and let the other
# volumes delete the snapshot later. If there are no references, rm
# would succeed and the snapshot is deleted.
if not os.path.exists(fchild) and fparent:
fpbase = os.path.basename(fparent)
if fpbase.endswith('.snap') or fpbase.endswith('.ts'):
self._delete_gpfs_file(fparent)
def delete_volume(self, volume):
"""Deletes a logical volume."""
# Check if GPFS is mounted
self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base)
volume_path = self.local_path(volume)
self._delete_gpfs_file(volume_path)
def _gpfs_redirect(self, src):
"""Removes the copy_on_write dependency between src and parent.
Remove the copy_on_write dependency between the src file and its
immediate parent such that the length of dependency chain is reduced
by 1.
"""
max_depth = self.configuration.gpfs_max_clone_depth
if max_depth == 0:
return False
(out, err) = self._execute('mmclone', 'show', src, run_as_root=True)
depth_regex = re.compile(r'.*\s+no\s+(?P<depth>\d+)', re.M | re.S)
match = depth_regex.match(out)
if match:
depth = int(match.group('depth'))
if depth > max_depth:
self._execute('mmclone', 'redirect', src, run_as_root=True)
return True
return False
def _create_gpfs_clone(self, src, dest):
"""Create a GPFS file clone parent for the specified file."""
snap = dest + ".snap"
self._create_gpfs_snap(src, snap)
self._create_gpfs_copy(snap, dest)
if self._gpfs_redirect(src) and self._gpfs_redirect(dest):
self._execute('rm', '-f', snap, run_as_root=True)
def _create_gpfs_copy(self, src, dest):
"""Create a GPFS file clone copy for the specified file."""
self._execute('mmclone', 'copy', src, dest, run_as_root=True)
def _gpfs_full_copy(self, src, dest):
"""Create a full copy from src to dest."""
self._execute('cp', src, dest,
check_exit_code=True, run_as_root=True)
def _create_gpfs_snap(self, src, dest=None):
"""Create a GPFS file clone snapshot for the specified file."""
if dest is None:
self._execute('mmclone', 'snap', src, run_as_root=True)
else:
self._execute('mmclone', 'snap', src, dest, run_as_root=True)
def _is_gpfs_parent_file(self, gpfs_file):
"""Return true if the specified file is a gpfs clone parent."""
out, err = self._execute('mmclone', 'show', gpfs_file,
run_as_root=True)
ptoken = out.splitlines().pop().split()[0]
return ptoken == 'yes'
def create_snapshot(self, snapshot):
"""Creates a GPFS snapshot."""
snapshot_path = self._get_snapshot_path(snapshot)
volume_path = os.path.join(os.path.dirname(snapshot_path),
snapshot['volume_name'])
self._create_gpfs_snap(src=volume_path, dest=snapshot_path)
self._set_rw_permission(snapshot_path, modebits='640')
self._gpfs_redirect(volume_path)
def delete_snapshot(self, snapshot):
"""Deletes a GPFS snapshot."""
# Rename the deleted snapshot to indicate it no longer exists in
# cinder db. Attempt to delete the snapshot. If the snapshot has
# clone children, the delete will fail silently. When volumes that
# are clone children are deleted in the future, the remaining ts
# snapshots will also be deleted.
snapshot_path = self._get_snapshot_path(snapshot)
snapshot_ts_path = '%s.ts' % snapshot_path
self._execute('mv', snapshot_path, snapshot_ts_path, run_as_root=True)
self._execute('rm', '-f', snapshot_ts_path,
check_exit_code=False, run_as_root=True)
def _get_snapshot_path(self, snapshot):
ctxt = context.get_admin_context()
snap_parent_vol = self.db.volume_get(ctxt, snapshot['volume_id'])
snap_parent_vol_path = self.local_path(snap_parent_vol)
snapshot_path = os.path.join(os.path.dirname(snap_parent_vol_path),
snapshot['name'])
return snapshot_path
def local_path(self, volume):
"""Return the local path for the specified volume."""
# Check if the volume is part of a consistency group and return
# the local_path accordingly.
if volume['consistencygroup_id'] is not None:
cgname = "consisgroup-%s" % volume['consistencygroup_id']
volume_path = os.path.join(
self.configuration.gpfs_mount_point_base,
cgname,
volume['name']
)
else:
volume_path = os.path.join(
self.configuration.gpfs_mount_point_base,
volume['name']
)
return volume_path
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
pass
def create_export(self, context, volume):
"""Exports the volume."""
pass
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
pass
def initialize_connection(self, volume, connector):
return {
'driver_volume_type': 'gpfs',
'data': {
'name': volume['name'],
'device_path': self.local_path(volume),
}
}
def terminate_connection(self, volume, connector, **kwargs):
pass
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, or stats have never been updated, run update
the stats first.
"""
if not self._stats or refresh:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
LOG.debug("Updating volume stats.")
gpfs_base = self.configuration.gpfs_mount_point_base
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data["volume_backend_name"] = backend_name or 'GPFS'
data["vendor_name"] = 'IBM'
data["driver_version"] = self.VERSION
data["storage_protocol"] = 'file'
free, capacity = self._get_available_capacity(self.configuration.
gpfs_mount_point_base)
data['total_capacity_gb'] = math.ceil(capacity / units.Gi)
data['free_capacity_gb'] = math.ceil(free / units.Gi)
data['reserved_percentage'] = 0
data['QoS_support'] = False
data['storage_pool'] = self._storage_pool
data['location_info'] = ('GPFSDriver:%(cluster_id)s:%(root_path)s' %
{'cluster_id': self._cluster_id,
'root_path': gpfs_base})
data['consistencygroup_support'] = 'True'
self._stats = data
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
"""Create a volume from the specified image."""
return self._clone_image(volume, image_location, image_meta['id'])
def _is_cloneable(self, image_id):
"""Return true if the specified image can be cloned by GPFS."""
if not((self.configuration.gpfs_images_dir and
self.configuration.gpfs_images_share_mode)):
reason = 'glance repository not configured to use GPFS'
return False, reason, None
image_path = os.path.join(self.configuration.gpfs_images_dir, image_id)
try:
self._is_gpfs_path(image_path)
except processutils.ProcessExecutionError:
reason = 'image file not in GPFS'
return False, reason, None
return True, None, image_path
def _clone_image(self, volume, image_location, image_id):
"""Attempt to create a volume by efficiently copying image to volume.
If both source and target are backed by gpfs storage and the source
image is in raw format move the image to create a volume using either
gpfs clone operation or with a file copy. If the image format is not
raw, convert it to raw at the volume path.
"""
# Check if GPFS is mounted
self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base)
cloneable_image, reason, image_path = self._is_cloneable(image_id)
if not cloneable_image:
LOG.debug('Image %(img)s not cloneable: %(reas)s.',
{'img': image_id, 'reas': reason})
return (None, False)
vol_path = self.local_path(volume)
data = image_utils.qemu_img_info(image_path)
# if image format is already raw either clone it or
# copy it depending on config file settings
if data.file_format == 'raw':
if (self.configuration.gpfs_images_share_mode ==
'copy_on_write'):
LOG.debug('Clone image to vol %s using mmclone.',
volume['id'])
# if the image is not already a GPFS snap file make it so
if not self._is_gpfs_parent_file(image_path):
self._create_gpfs_snap(image_path)
self._create_gpfs_copy(image_path, vol_path)
elif self.configuration.gpfs_images_share_mode == 'copy':
LOG.debug('Clone image to vol %s using copyfile.',
volume['id'])
shutil.copyfile(image_path, vol_path)
# if image is not raw convert it to raw into vol_path destination
else:
LOG.debug('Clone image to vol %s using qemu convert.',
volume['id'])
image_utils.convert_image(image_path, vol_path, 'raw')
self._set_rw_permission(vol_path)
self._resize_volume_file(volume, volume['size'])
return {'provider_location': None}, True
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume.
Note that cinder.volume.flows.create_volume will attempt to use
clone_image to efficiently create volume from image when both
source and target are backed by gpfs storage. If that is not the
case, this function is invoked and uses fetch_to_raw to create the
volume.
"""
# Check if GPFS is mounted
self._verify_gpfs_path_state(self.configuration.gpfs_mount_point_base)
LOG.debug('Copy image to vol %s using image_utils fetch_to_raw.',
volume['id'])
image_utils.fetch_to_raw(context, image_service, image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'])
self._resize_volume_file(volume, volume['size'])
def _resize_volume_file(self, volume, new_size):
"""Resize volume file to new size."""
vol_path = self.local_path(volume)
try:
image_utils.resize_image(vol_path, new_size, run_as_root=True)
except processutils.ProcessExecutionError as exc:
LOG.error(_LE("Failed to resize volume "
"%(volume_id)s, error: %(error)s."),
{'volume_id': volume['id'],
'error': exc.stderr})
raise exception.VolumeBackendAPIException(data=exc.stderr)
data = image_utils.qemu_img_info(vol_path)
return data.virtual_size
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
self._resize_volume_file(volume, new_size)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
volume_path = self.local_path(volume)
LOG.debug('Begin backup of volume %s.', volume['name'])
# create a snapshot that will be used as the backup source
backup_path = '%s_%s' % (volume_path, backup['id'])
self._create_gpfs_clone(volume_path, backup_path)
self._gpfs_redirect(volume_path)
try:
with utils.temporary_chown(backup_path):
with fileutils.file_open(backup_path) as backup_file:
backup_service.backup(backup, backup_file)
finally:
# clean up snapshot file. If it is a clone parent, delete
# will fail silently, but be cleaned up when volume is
# eventually removed. This ensures we do not accumulate
# more than gpfs_max_clone_depth snap files.
self._delete_gpfs_file(backup_path)
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
LOG.debug('Begin restore of backup %s.', backup['id'])
volume_path = self.local_path(volume)
with utils.temporary_chown(volume_path):
with fileutils.file_open(volume_path, 'wb') as volume_file:
backup_service.restore(backup, volume['id'], volume_file)
def _migrate_volume(self, volume, host):
"""Migrate vol if source and dest are managed by same GPFS cluster."""
LOG.debug('Migrate volume request %(vol)s to %(host)s.',
{'vol': volume['name'],
'host': host['host']})
dest_path = self._can_migrate_locally(host)
if dest_path is None:
LOG.debug('Cannot migrate volume locally, use generic migration.')
return (False, None)
if dest_path == self.configuration.gpfs_mount_point_base:
LOG.debug('Migration target is same cluster and path, '
'no work needed.')
return (True, None)
LOG.debug('Migration target is same cluster but different path, '
'move the volume file.')
local_path = self.local_path(volume)
new_path = os.path.join(dest_path, volume['name'])
try:
self._execute('mv', local_path, new_path, run_as_root=True)
return (True, None)
except processutils.ProcessExecutionError as exc:
LOG.error(_LE('Driver-based migration of volume %(vol)s failed. '
'Move from %(src)s to %(dst)s failed with error: '
'%(error)s.'),
{'vol': volume['name'],
'src': local_path,
'dst': new_path,
'error': exc.stderr})
return (False, None)
def migrate_volume(self, context, volume, host):
"""Attempt to migrate a volume to specified host."""
return self._migrate_volume(volume, host)
def retype(self, context, volume, new_type, diff, host):
"""Modify volume to be of new type."""
LOG.debug('Retype volume request %(vol)s to be %(type)s '
'(host: %(host)s), diff %(diff)s.',
{'vol': volume['name'],
'type': new_type,
'host': host,
'diff': diff})
retyped = False
migrated = False
pools = diff['extra_specs'].get('capabilities:storage_pool')
backends = diff['extra_specs'].get('volume_backend_name')
hosts = (volume['host'], host['host'])
# if different backends let migration create a new volume and copy
# data because the volume is considered to be substantially different
if _different(backends):
backend1, backend2 = backends
LOG.debug('Retype request is for different backends, '
'use migration: %(backend1)s %(backend2)s.',
{'backend1': backend1, 'backend2': backend1})
return False
if _different(pools):
old, new = pools
LOG.debug('Retype pool attribute from %(old)s to %(new)s.',
{'old': old, 'new': new})
retyped = self._update_volume_storage_pool(self.local_path(volume),
new)
if _different(hosts):
source, destination = hosts
LOG.debug('Retype hosts migrate from: %(source)s to '
'%(destination)s.', {'source': source,
'destination': destination})
migrated, mdl_update = self._migrate_volume(volume, host)
if migrated:
updates = {'host': host['host']}
self.db.volume_update(context, volume['id'], updates)
return retyped or migrated
def _mkfs(self, volume, filesystem, label=None):
"""Initialize volume to be specified filesystem type."""
if filesystem == 'swap':
cmd = ['mkswap']
else:
cmd = ['mkfs', '-t', filesystem]
if filesystem in ('ext3', 'ext4'):
cmd.append('-F')
if label:
if filesystem in ('msdos', 'vfat'):
label_opt = '-n'
else:
label_opt = '-L'
cmd.extend([label_opt, label])
path = self.local_path(volume)
cmd.append(path)
try:
self._execute(*cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
exception_message = (_("mkfs failed on volume %(vol)s, "
"error message was: %(err)s.")
% {'vol': volume['name'], 'err': exc.stderr})
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
data=exception_message)
def _get_available_capacity(self, path):
"""Calculate available space on path."""
# Check if GPFS is mounted
try:
self._verify_gpfs_path_state(path)
mounted = True
except exception.VolumeBackendAPIException:
mounted = False
# If GPFS is not mounted, return zero capacity. So that the volume
# request can be scheduled to another volume service.
if not mounted:
return 0, 0
out, err = self._execute('df', '-P', '-B', '1', path,
run_as_root=True)
out = out.splitlines()[1]
size = int(out.split()[1])
available = int(out.split()[3])
return available, size
def _verify_gpfs_path_state(self, path):
"""Examine if GPFS is active and file system is mounted or not."""
try:
self._is_gpfs_path(path)
except processutils.ProcessExecutionError:
msg = (_('%s cannot be accessed. Verify that GPFS is active and '
'file system is mounted.') % path)
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
def create_consistencygroup(self, context, group):
"""Create consistency group of GPFS volumes."""
cgname = "consisgroup-%s" % group['id']
fsdev = self._gpfs_device
cgpath = os.path.join(self.configuration.gpfs_mount_point_base,
cgname)
try:
self._execute('mmcrfileset', fsdev, cgname,
'--inode-space', 'new', run_as_root=True)
except processutils.ProcessExecutionError as e:
msg = (_('Failed to create consistency group: %(cgid)s. '
'Error: %(excmsg)s.') %
{'cgid': group['id'], 'excmsg': six.text_type(e)})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
try:
self._execute('mmlinkfileset', fsdev, cgname,
'-J', cgpath, run_as_root=True)
except processutils.ProcessExecutionError as e:
msg = (_('Failed to link fileset for the share %(cgname)s. '
'Error: %(excmsg)s.') %
{'cgname': cgname, 'excmsg': six.text_type(e)})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
try:
self._execute('chmod', '770', cgpath, run_as_root=True)
except processutils.ProcessExecutionError as e:
msg = (_('Failed to set permissions for the consistency group '
'%(cgname)s. '
'Error: %(excmsg)s.') %
{'cgname': cgname, 'excmsg': six.text_type(e)})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
model_update = {'status': 'available'}
return model_update
def delete_consistencygroup(self, context, group):
"""Delete consistency group of GPFS volumes."""
cgname = "consisgroup-%s" % group['id']
fsdev = self._gpfs_device
model_update = {}
model_update['status'] = group['status']
volumes = self.db.volume_get_all_by_group(context, group['id'])
# Unlink and delete the fileset associated with the consistency group.
# All of the volumes and volume snapshot data will also be deleted.
try:
self._execute('mmunlinkfileset', fsdev, cgname, '-f',
run_as_root=True)
except processutils.ProcessExecutionError as e:
msg = (_('Failed to unlink fileset for consistency group '
'%(cgname)s. Error: %(excmsg)s.') %
{'cgname': cgname, 'excmsg': six.text_type(e)})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
try:
self._execute('mmdelfileset', fsdev, cgname, '-f',
run_as_root=True)
except processutils.ProcessExecutionError as e:
msg = (_('Failed to delete fileset for consistency group '
'%(cgname)s. Error: %(excmsg)s.') %
{'cgname': cgname, 'excmsg': six.text_type(e)})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
for volume_ref in volumes:
volume_ref['status'] = 'deleted'
model_update = {'status': group['status']}
return model_update, volumes
def create_cgsnapshot(self, context, cgsnapshot):
"""Create snapshot of a consistency group of GPFS volumes."""
snapshots = self.db.snapshot_get_all_for_cgsnapshot(
context, cgsnapshot['id'])
for snapshot in snapshots:
self.create_snapshot(snapshot)
snapshot['status'] = 'available'
model_update = {'status': 'available'}
return model_update, snapshots
def delete_cgsnapshot(self, context, cgsnapshot):
"""Delete snapshot of a consistency group of GPFS volumes."""
snapshots = self.db.snapshot_get_all_for_cgsnapshot(
context, cgsnapshot['id'])
for snapshot in snapshots:
self.delete_snapshot(snapshot)
snapshot['status'] = 'deleted'
model_update = {'status': cgsnapshot['status']}
return model_update, snapshots
| |
#/u/GoldenSights
import bot3 as bot
import datetime
import praw
import random
import requests
import sqlite3
import string
import sys
import time
import traceback
USERAGENT = '''
/u/GoldenSights Usernames data collection:
Gathering the creation dates of user accounts for visualization.
More at https://github.com/voussoir/reddit/tree/master/Usernames
'''.replace('\n', ' ').strip()
sql = sqlite3.connect('D:\\git\\reddit\\usernames\\un.db')
cur = sql.cursor()
cur.execute('''
CREATE TABLE IF NOT EXISTS users(
idint INT,
idstr TEXT,
created INT,
human TEXT,
name TEXT,
link_karma INT,
comment_karma INT,
total_karma INT,
available INT,
lastscan INT)
''')
cur.execute('CREATE INDEX IF NOT EXISTS userindex ON users(idint)')
cur.execute('CREATE INDEX IF NOT EXISTS index_users_available ON users(available)')
cur.execute('CREATE INDEX IF NOT EXISTS nameindex ON users(lowername)')
sql.commit()
# These numbers are used for interpreting the tuples that come from SELECT
SQL_USER_COLUMNS = [
'idint',
'idstr',
'created',
'human',
'name',
'link_karma',
'comment_karma',
'total_karma',
'available',
'lastscan',
'lowername',
]
SQL_USER = {key:index for (index, key) in enumerate(SQL_USER_COLUMNS)}
AVAILABILITY = {True:'available', False:'unavailable', 'available':1, 'unavailable':0}
HEADER_FULL = ' ID CREATED NAME LINK COMMENT TOTAL LAST SCANNED'
HEADER_BRIEF = ' LAST SCANNED | NAME'
MEMBERFORMAT_FULL = '{id:>6} {created} {username:<20} {link_karma:>9} {comment_karma:>9} ({total_karma:>10}) | {lastscan}'
MEMBERFORMAT_BRIEF = '{lastscan} | {username}'
MIN_LASTSCAN_DIFF = 86400 * 2000
# Don't rescan a name if we scanned it this many days ago
VALID_CHARS = string.ascii_letters + string.digits + '_-'
# If True, print the name of the user we're about to fetch.
# Good for debugging problematic users.
PREPRINT = False
print('Logging in.')
r = praw.Reddit(USERAGENT)
bot.login(r)
def allpossiblefromset(characters, length=None, minlength=None, maxlength=None):
'''
Given an iterable of characters, return a generator that creates every
permutation of length `length`.
If `minlength` and `maxlength` are both provided, all values of intermediate
lengths will be generated
'''
if not (minlength is None or maxlength is None):
for x in range(minlength, maxlength+1):
for item in allpossiblefromset(characters, x):
yield item
elif length is None:
raise ValueError('`length` must be provided if you arent using the min/max')
else:
endingpoint = len(characters) ** length
characters = ''.join(sorted(list(set(characters))))
for permutation in range(endingpoint):
permutation = base36encode(permutation, alphabet=characters)
l = len(permutation)
if l < length:
permutation = (characters[0] * (length-l)) + permutation
yield permutation
def base36encode(number, alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
"""Converts an integer to a base36 string."""
if not isinstance(number, (int)):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def base36decode(number):
return int(number, 36)
def b36(i):
if type(i) == int:
return base36encode(i)
if type(i) == str:
return base36decode(i)
def check_old(available=None, threshold=86400):
'''
Update names in ascending order of their last scan
available = False : do not include available names
None : do include available names
True : only include available names
threshold = how long ago the lastscan must be.
'''
now = getnow()
threshold = now - threshold
assert available in (False, None, True)
if available == False:
query = 'SELECT name FROM users WHERE available = 0 AND lastscan < ? ORDER BY lastscan ASC'
elif available == None:
query = 'SELECT name FROM users WHERE lastscan < ? ORDER BY lastscan ASC'
elif available == True:
query = 'SELECT name FROM users WHERE available = 1 AND lastscan < ? ORDER BY lastscan ASC'
cur.execute(query, [threshold])
availables = cur.fetchall()
for item in availables:
process(item, quiet=True, noskip=True)
def count(validonly=False):
if validonly:
cur.execute('SELECT COUNT(*) FROM users WHERE idint IS NOT NULL AND available == 0')
else:
cur.execute('SELECT COUNT(*) FROM users')
return cur.fetchone()[0]
def execit(*args, **kwargs):
'''
Allows another module to do stuff here using local names instead of qual names.
'''
exec(*args, **kwargs)
def fetchgenerator(cur):
'''
Create an generator from cur fetches so I don't
have to use while loops for everything
'''
while True:
fetch = cur.fetchone()
if fetch is None:
break
yield fetch
def fetchwriter(cur, outfile, spacer1=' ', spacer2=None, brief=False):
'''
Write items from the current sql query to the specified file.
If two spacers are provided, it will flip-flop between them
on alternating lines to help readability.
'''
flipflop = True
for item in fetchgenerator(cur):
spacer = spacer1 if flipflop else spacer2
if brief:
item = memberformat_brief(item)
else:
item = memberformat_full(item)
print(item, file=outfile)
if spacer2 is not None:
flipflop = not flipflop
def find(name, doreturn=False):
'''
Print the details of a username.
'''
f = getentry(name=name)
if f:
if doreturn:
return f
print_message(f)
return None
def get_from_hot(sr, limit=None, submissions=True, comments=False, returnnames=False):
'''
Shortcut for get_from_listing, using /hot
'''
listfunction = praw.objects.Subreddit.get_hot
return get_from_listing(sr, limit, listfunction, submissions, comments, returnnames)
def get_from_listing(sr, limit, listfunction, submissions=True, comments=True, returnnames=False):
'''
Get submission listings using one of PRAW's get methods
and process those usernames
`listfunction` would be praw.objects.Subreddit.get_new for example
'''
subreddit = r.get_subreddit(sr, fetch=sr != 'all')
if limit is None:
limit = 1000
authors = set()
if submissions is True:
print('/r/%s, %d submissions' % (subreddit.display_name, limit))
subreddit.lf = listfunction
for item in subreddit.lf(subreddit, limit=limit):
if item.author is not None:
authors.add(item.author.name)
if comments is True:
print('/r/%s, %d comments' % (subreddit.display_name, limit))
for item in subreddit.get_comments(limit=limit):
if item.author is not None:
authors.add(item.author.name)
if returnnames is True:
return authors
try:
process(authors)
except KeyboardInterrupt:
sql.commit()
raise
def get_from_new(sr, limit=None, submissions=True, comments=True, returnnames=False):
'''
Shortcut for get_from_listing, using /new
'''
listfunction = praw.objects.Subreddit.get_new
return get_from_listing(sr, limit, listfunction, submissions, comments, returnnames)
def get_from_top(sr, limit=None, submissions=True, comments=False, returnnames=False):
'''
Shortcut for get_from_listing, using /top?t=all
'''
listfunction = praw.objects.Subreddit.get_top_from_all
return get_from_listing(sr, limit, listfunction, submissions, comments, returnnames)
def getentry(**kwargs):
if len(kwargs) != 1:
raise Exception("Only 1 argument please")
kw = list(kwargs.keys())[0]
if kw == 'idint':
cur.execute('SELECT * FROM users WHERE idint=?', [kwargs[kw]])
elif kw == 'idstr':
cur.execute('SELECT * FROM users WHERE idstr=?', [kwargs[kw]])
elif kw == 'name':
cur.execute('SELECT * FROM users WHERE lowername=?', [kwargs[kw].lower()])
else:
return None
return cur.fetchone()
def getnow(timestamp=True):
now = datetime.datetime.now(datetime.timezone.utc)
if timestamp:
return now.timestamp()
return now
def human(timestamp):
day = datetime.datetime.utcfromtimestamp(timestamp)
human = datetime.datetime.strftime(day, "%b %d %Y %H:%M:%S UTC")
return human
def idlenew(subreddit='all', limit=100, sleepy=15):
'''
Infinitely grab the /new queue and process names, ignoring any
exceptions. Great for processing while AFK.
'''
while True:
try:
get_from_new(subreddit, limit)
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
r._use_oauth = False
time.sleep(sleepy)
def load_textfile(filename):
'''
Returns list of lines.
See also `save_textfile`.
'''
f = open(filename, 'r')
lines = [x.strip() for x in f.readlines()]
f.close()
return lines
def memberformat_brief(data):
'''
Shorter version of memberformat which I'm using for the "available" list.
'''
name = data[SQL_USER['name']]
lastscan = data[SQL_USER['lastscan']]
lastscan = human(lastscan)
out = MEMBERFORMAT_BRIEF.format(lastscan=lastscan, username=name)
return out
def memberformat_full(data):
'''
Given a data list, create a string that will
become a single row in one of the show files.
'''
idstr = data[SQL_USER['idstr']]
# Usernames are maximum of 20 chars
name = data[SQL_USER['name']]
created = data[SQL_USER['human']]
created = created or ''
link_karma = data[SQL_USER['link_karma']]
comment_karma = data[SQL_USER['comment_karma']]
total_karma = data[SQL_USER['total_karma']]
if link_karma is None:
link_karma = 'None'
comment_karma = 'None'
total_karma = 'None'
else:
link_karma = '{:,}'.format(link_karma)
comment_karma = '{:,}'.format(comment_karma)
total_karma = '{:,}'.format(total_karma)
lastscan = data[SQL_USER['lastscan']]
lastscan = human(lastscan)
out = MEMBERFORMAT_FULL.format(
id=idstr,
created=created,
username=name,
link_karma=link_karma,
comment_karma=comment_karma,
total_karma=total_karma,
lastscan=lastscan,
)
return out
def popgenerator(x):
'''
Create a generator which whittles away at the input
list until there are no items left.
This destroys the input list in-place.
'''
while len(x) > 0:
yield x.pop()
def process(users, quiet=False, knownid='', noskip=False):
'''
Fetch the /u/ page for a user or list of users
users : A list of strings, each representing a username. Since reddit
usernames must be 3 - 20 characters and only contain
alphanumeric + "_-", any improper strings will be removed.
quiet : Silences the "x old" report at the end
knownid : If you're processing a user which does not exist, but you know
what their user ID was supposed to be, this will at least allow
you to flesh out the database entry a little better.
noskip : Do not skip usernames which are already in the database.
'''
olds = 0
if isinstance(users, list):
users = list(set(users))
if isinstance(users, str):
users = [users]
# I don't want to import types.GeneratorType just for one isinstance
if type(users).__name__ == 'generator' or len(users) > 1:
knownid=''
users = userify_list(users, noskip=noskip, quiet=quiet)
current = 0
for user in users:
current += 1
data = [None] * len(SQL_USER)
data[SQL_USER['lastscan']] = int(getnow())
if isinstance(user, list):
# This happens when we receive NotFound. [name, availability]
if knownid != '':
data[SQL_USER['idint']] = b36(knownid)
data[SQL_USER['idstr']] = knownid
data[SQL_USER['name']] = user[0]
data[SQL_USER['available']] = AVAILABILITY[user[1]]
else:
# We have a Redditor object.
h = human(user.created_utc)
data[SQL_USER['idint']] = b36(user.id)
data[SQL_USER['idstr']] = user.id
data[SQL_USER['created']] = user.created_utc
data[SQL_USER['human']] = h
data[SQL_USER['name']] = user.name
data[SQL_USER['link_karma']] = user.link_karma
data[SQL_USER['comment_karma']] = user.comment_karma
data[SQL_USER['total_karma']] = user.comment_karma + user.link_karma
data[SQL_USER['available']] = 0
data[SQL_USER['lowername']] = data[SQL_USER['name']].lower()
printprefix = '%04d' % current
x = smartinsert(data, printprefix)
if x is False:
olds += 1
if quiet is False:
print('%d old' % olds)
p = process
def processid(idnum, ranger=1):
'''
Do an author_fullname search in an attempt to find a user by their ID.
This is not reliable if the user has no public submissions.
'''
idnum = idnum.split('_')[-1]
base = b36(idnum)
for x in range(ranger):
idnum = x + base
exists = getentry(idint=idnum)
if exists is not None:
print('Skipping %s : %s' % (b36(idnum), exists[SQL_USER['name']]))
continue
idnum = 't2_' + b36(idnum)
idnum = idnum.lower()
print('%s - ' % idnum, end='', flush=True)
search = list(r.search('author_fullname:%s' % idnum))
if len(search) > 0:
item = search[0].author.name
process(item, quiet=True, knownid=idnum[3:])
else:
print('No idea.')
pid = processid
def process_input():
while True:
try:
x = input('p> ')
except KeyboardInterrupt:
print()
break
if not x:
continue
try:
process(x, quiet=True, noskip=True)
except:
traceback.print_exc()
def process_from_database(filename, table, column, delete_original=False):
'''
Warning: if delete_original is True, the original database will lose each username
as it is processed
'''
s = sqlite3.connect(filename)
c = s.cursor()
c2 = s.cursor()
query = 'SELECT DISTINCT %s FROM %s' % (column, table)
c.execute(query)
i = 0
try:
for item in fetchgenerator(c):
i = (i + 1) % 100
if i == 0:
s.commit()
username = item[0]
if username is not None:
p(username, quiet=True)
if delete_original:
c2.execute('DELETE FROM %s WHERE %s == ?' % (table, column), [username])
except (Exception, KeyboardInterrupt) as e:
if delete_original:
print('Committing changes...')
s.commit()
e.sql = s
raise e
return s
def print_message(data, printprefix=''):
if data[SQL_USER['human']] is not None:
print('{prefix:>5} {idstr:>6} : {human} : {name} : {link_karma} : {comment_karma}'.format(
prefix=printprefix,
idstr=data[SQL_USER['idstr']],
human=data[SQL_USER['human']],
name=data[SQL_USER['name']],
link_karma=data[SQL_USER['link_karma']],
comment_karma=data[SQL_USER['comment_karma']],
)
)
else:
availability = 'available' if data[SQL_USER['available']] is 1 else 'unavailable'
print('{prefix:>5} {availability:>33} : {name}'.format(
prefix=printprefix,
availability=availability,
name=data[SQL_USER['name']],
)
)
def save_textfile(filename, lines):
'''
Write items of list as lines in file.
See also `load_textfile`.
'''
f = open(filename, 'w')
for x in lines:
print(x, file=f)
f.close()
def show():
'''
Create a bunch of text files that nobody will read
'''
file_time = open('show\\time.txt', 'w')
file_name = open('show\\name.txt', 'w')
file_karma_total = open('show\\karma_total.txt', 'w')
#file_karma_link = open('show\\karma_link.txt', 'w')
#file_karma_comment = open('show\\karma_comment.txt', 'w')
file_available = open('show\\available.txt', 'w')
file_stats = open('show\\stats.txt', 'w')
file_readme = open('README.md', 'r')
totalitems = count(validonly=False)
validitems = count(validonly=True)
print(totalitems, validitems)
print('Updating readme')
readmelines = file_readme.readlines()
file_readme.close()
readmelines[3] = '##### {0:,} accounts\n'.format(validitems)
readmelines = ''.join(readmelines)
file_readme = open('README.md', 'w')
file_readme.write(readmelines)
file_readme.close()
print('Writing stats file.')
print('DO SOMETHING')
file_stats.close()
print('Writing time file.')
print(HEADER_FULL, file=file_time)
cur.execute('SELECT * FROM users WHERE idint IS NOT NULL AND created IS NOT NULL ORDER BY created ASC')
fetchwriter(cur, file_time)
file_time.close()
print('Writing name file.')
print(HEADER_FULL, file=file_name)
cur.execute('SELECT * FROM users WHERE idint IS NOT NULL ORDER BY lowername ASC')
fetchwriter(cur, file_name)
file_name.close()
print('Writing karma total file.')
print(HEADER_FULL, file=file_karma_total)
cur.execute('SELECT * FROM users WHERE idint IS NOT NULL ORDER BY total_karma DESC, lowername ASC')
fetchwriter(cur, file_karma_total)
file_karma_total.close()
#print('Writing karma link file.')
#print(HEADER_FULL, file=file_karma_link)
#cur.execute('SELECT * FROM users WHERE idint IS NOT NULL ORDER BY link_karma DESC, lowername ASC')
#fetchwriter(cur, file_karma_link)
#file_karma_link.close()
#print('Writing karma comment file.')
#print(HEADER_FULL, file=file_karma_comment)
#cur.execute('SELECT * FROM users WHERE idint IS NOT NULL ORDER BY comment_karma DESC, lowername ASC')
#fetchwriter(cur, file_karma_comment)
#file_karma_comment.close()
print('Writing available')
print(HEADER_BRIEF, file=file_available)
cur.execute('SELECT * FROM users WHERE available == 1 AND LENGTH(name) > 3 ORDER BY lowername ASC')
fetchwriter(cur, file_available, spacer1=' ', brief=True)
file_available.close()
def smartinsert(data, printprefix=''):
'''
Originally, all queries were based on idint, but this caused problems
when accounts were deleted / banned, because it wasn't possible to
sql-update without knowing the ID.
'''
print_message(data, printprefix)
exists_in_db = (getentry(name=data[SQL_USER['name']].lower()) is not None)
if exists_in_db:
isnew = False
data = [
data[SQL_USER['idint']],
data[SQL_USER['idstr']],
data[SQL_USER['created']],
data[SQL_USER['human']],
data[SQL_USER['link_karma']],
data[SQL_USER['comment_karma']],
data[SQL_USER['total_karma']],
data[SQL_USER['available']],
data[SQL_USER['lastscan']],
data[SQL_USER['name']],
data[SQL_USER['name']].lower()]
# coalesce allows us to fallback on the existing values
# if the given values are null, to avoid erasing data about users
# whose accounts are now deleted.
command = '''
UPDATE users SET
idint = coalesce(?, idint),
idstr = coalesce(?, idstr),
created = coalesce(?, created),
human = coalesce(?, human),
link_karma = coalesce(?, link_karma),
comment_karma = coalesce(?, comment_karma),
total_karma = coalesce(?, total_karma),
available = coalesce(?, available),
lastscan = coalesce(?, lastscan),
name = coalesce(?, name)
WHERE lowername == ?
'''
cur.execute(command, data)
else:
isnew = True
cur.execute('INSERT INTO users VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', data)
sql.commit()
return isnew
def userify_list(users, noskip=False, quiet=False):
if quiet is False:
if hasattr(users, '__len__'):
print('Processing %d unique names' % len(users))
for username in users:
if isinstance(username, str):
if len(username) < 3 or len(username) > 20:
print('%s : Invalid length of %d' % (username, len(username)))
continue
if not all(c in VALID_CHARS for c in username):
print('%s : Contains invalid characters' % username)
continue
elif isinstance(username, praw.objects.Redditor):
username = username.name.lower()
else:
print('Don\'t know what to do with %s' % username)
existing_entry = getentry(name=username)
if existing_entry is not None:
lastscan = existing_entry[SQL_USER['lastscan']]
should_rescan = (getnow() - lastscan) > MIN_LASTSCAN_DIFF
if should_rescan is False and noskip is False:
prefix = ' ' * 31
appendix = '(available)' if existing_entry[SQL_USER['available']] else ''
print('%sskipping : %s %s' % (prefix, username, appendix))
continue
try:
if PREPRINT:
print(username)
user = r.get_redditor(username, fetch=True)
if getattr(user, 'is_suspended', False):
# Suspended accounts provide extremely little info
# {"kind": "t2", "data": {"is_suspended": true, "name": "*****"}}
continue
yield user
except praw.errors.NotFound:
availability = r.is_username_available(username)
availability = AVAILABILITY[availability]
yield [username, availability]
| |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:6218")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:6218")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Asiadigicoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Asiadigicoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| |
# Authors: Nicolas Tresegnie <nicolas.tresegnie@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
import numpy.ma as ma
from scipy import sparse
from scipy import stats
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
from ..utils import as_float_array
from ..utils.fixes import astype
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'Imputer',
]
def _get_mask(X, value_to_mask):
"""Compute the boolean mask X == missing_values."""
if value_to_mask == "NaN" or np.isnan(value_to_mask):
return np.isnan(X)
else:
return X == value_to_mask
def _get_median(data, n_zeros):
"""Compute the median of data with n_zeros additional zeros.
This function is used to support sparse matrices; it modifies data in-place
"""
n_elems = len(data) + n_zeros
if not n_elems:
return np.nan
n_negative = np.count_nonzero(data < 0)
middle, is_odd = divmod(n_elems, 2)
data.sort()
if is_odd:
return _get_elem_at_rank(middle, data, n_negative, n_zeros)
return (_get_elem_at_rank(middle - 1, data, n_negative, n_zeros) +
_get_elem_at_rank(middle, data, n_negative, n_zeros)) / 2.
def _get_elem_at_rank(rank, data, n_negative, n_zeros):
"""Find the value in data augmented with n_zeros for the given rank"""
if rank < n_negative:
return data[rank]
if rank - n_negative < n_zeros:
return 0
return data[rank - n_zeros]
def _most_frequent(array, extra_value, n_repeat):
"""Compute the most frequent value in a 1d array extended with
[extra_value] * n_repeat, where extra_value is assumed to be not part
of the array."""
# Compute the most frequent value in array only
if array.size > 0:
mode = stats.mode(array)
most_frequent_value = mode[0][0]
most_frequent_count = mode[1][0]
else:
most_frequent_value = 0
most_frequent_count = 0
# Compare to array + [extra_value] * n_repeat
if most_frequent_count == 0 and n_repeat == 0:
return np.nan
elif most_frequent_count < n_repeat:
return extra_value
elif most_frequent_count > n_repeat:
return most_frequent_value
elif most_frequent_count == n_repeat:
# Ties the breaks. Copy the behaviour of scipy.stats.mode
if most_frequent_value < extra_value:
return most_frequent_value
else:
return extra_value
class Imputer(BaseEstimator, TransformerMixin):
"""Imputation transformer for completing missing values.
Parameters
----------
missing_values : integer or "NaN", optional (default="NaN")
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed. For missing values encoded as np.nan,
use the string value "NaN".
strategy : string, optional (default="mean")
The imputation strategy.
- If "mean", then replace missing values using the mean along
the axis.
- If "median", then replace missing values using the median along
the axis.
- If "most_frequent", then replace missing using the most frequent
value along the axis.
axis : integer, optional (default=0)
The axis along which to impute.
- If `axis=0`, then impute along columns.
- If `axis=1`, then impute along rows.
verbose : integer, optional (default=0)
Controls the verbosity of the imputer.
copy : boolean, optional (default=True)
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible. Note that, in the following cases,
a new copy will always be made, even if `copy=False`:
- If X is not an array of floating values;
- If X is sparse and `missing_values=0`;
- If `axis=0` and X is encoded as a CSR matrix;
- If `axis=1` and X is encoded as a CSC matrix.
Attributes
----------
statistics_ : array of shape (n_features,)
The imputation fill value for each feature if axis == 0.
Notes
-----
- When ``axis=0``, columns which only contained missing values at `fit`
are discarded upon `transform`.
- When ``axis=1``, an exception is raised if there are rows for which it is
not possible to fill in the missing values (e.g., because they only
contain missing values).
"""
def __init__(self, missing_values="NaN", strategy="mean",
axis=0, verbose=0, copy=True):
self.missing_values = missing_values
self.strategy = strategy
self.axis = axis
self.verbose = verbose
self.copy = copy
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : object
Returns self.
"""
# Check parameters
allowed_strategies = ["mean", "median", "most_frequent"]
if self.strategy not in allowed_strategies:
raise ValueError("Can only use these strategies: {0} "
" got strategy={1}".format(allowed_strategies,
self.strategy))
if self.axis not in [0, 1]:
raise ValueError("Can only impute missing values on axis 0 and 1, "
" got axis={0}".format(self.axis))
# Since two different arrays can be provided in fit(X) and
# transform(X), the imputation data will be computed in transform()
# when the imputation is done per sample (i.e., when axis=1).
if self.axis == 0:
X = check_array(X, accept_sparse='csc', dtype=np.float64,
force_all_finite=False)
if sparse.issparse(X):
self.statistics_ = self._sparse_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
self.statistics_ = self._dense_fit(X,
self.strategy,
self.missing_values,
self.axis)
return self
def _sparse_fit(self, X, strategy, missing_values, axis):
"""Fit the transformer on sparse data."""
# Imputation is done "by column", so if we want to do it
# by row we only need to convert the matrix to csr format.
if axis == 1:
X = X.tocsr()
else:
X = X.tocsc()
# Count the zeros
if missing_values == 0:
n_zeros_axis = np.zeros(X.shape[not axis], dtype=int)
else:
n_zeros_axis = X.shape[axis] - np.diff(X.indptr)
# Mean
if strategy == "mean":
if missing_values != 0:
n_non_missing = n_zeros_axis
# Mask the missing elements
mask_missing_values = _get_mask(X.data, missing_values)
mask_valids = np.logical_not(mask_missing_values)
# Sum only the valid elements
new_data = X.data.copy()
new_data[mask_missing_values] = 0
X = sparse.csc_matrix((new_data, X.indices, X.indptr),
copy=False)
sums = X.sum(axis=0)
# Count the elements != 0
mask_non_zeros = sparse.csc_matrix(
(mask_valids.astype(np.float64),
X.indices,
X.indptr), copy=False)
s = mask_non_zeros.sum(axis=0)
n_non_missing = np.add(n_non_missing, s)
else:
sums = X.sum(axis=axis)
n_non_missing = np.diff(X.indptr)
# Ignore the error, columns with a np.nan statistics_
# are not an error at this point. These columns will
# be removed in transform
with np.errstate(all="ignore"):
return np.ravel(sums) / np.ravel(n_non_missing)
# Median + Most frequent
else:
# Remove the missing values, for each column
columns_all = np.hsplit(X.data, X.indptr[1:-1])
mask_missing_values = _get_mask(X.data, missing_values)
mask_valids = np.hsplit(np.logical_not(mask_missing_values),
X.indptr[1:-1])
# astype necessary for bug in numpy.hsplit before v1.9
columns = [col[astype(mask, bool, copy=False)]
for col, mask in zip(columns_all, mask_valids)]
# Median
if strategy == "median":
median = np.empty(len(columns))
for i, column in enumerate(columns):
median[i] = _get_median(column, n_zeros_axis[i])
return median
# Most frequent
elif strategy == "most_frequent":
most_frequent = np.empty(len(columns))
for i, column in enumerate(columns):
most_frequent[i] = _most_frequent(column,
0,
n_zeros_axis[i])
return most_frequent
def _dense_fit(self, X, strategy, missing_values, axis):
"""Fit the transformer on dense data."""
X = check_array(X, force_all_finite=False)
mask = _get_mask(X, missing_values)
masked_X = ma.masked_array(X, mask=mask)
# Mean
if strategy == "mean":
mean_masked = np.ma.mean(masked_X, axis=axis)
# Avoid the warning "Warning: converting a masked element to nan."
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = np.nan
return mean
# Median
elif strategy == "median":
if tuple(int(v) for v in np.__version__.split('.')[:2]) < (1, 5):
# In old versions of numpy, calling a median on an array
# containing nans returns nan. This is different is
# recent versions of numpy, which we want to mimic
masked_X.mask = np.logical_or(masked_X.mask,
np.isnan(X))
median_masked = np.ma.median(masked_X, axis=axis)
# Avoid the warning "Warning: converting a masked element to nan."
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = np.nan
return median
# Most frequent
elif strategy == "most_frequent":
# scipy.stats.mstats.mode cannot be used because it will no work
# properly if the first element is masked and if it's frequency
# is equal to the frequency of the most frequent valid element
# See https://github.com/scipy/scipy/issues/2636
# To be able access the elements by columns
if axis == 0:
X = X.transpose()
mask = mask.transpose()
most_frequent = np.empty(X.shape[0])
for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(np.bool)
row = row[row_mask]
most_frequent[i] = _most_frequent(row, np.nan, 0)
return most_frequent
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
The input data to complete.
"""
# Copy just once
X = as_float_array(X, copy=self.copy, force_all_finite=False)
# Since two different arrays can be provided in fit(X) and
# transform(X), the imputation data need to be recomputed
# when the imputation is done per sample
if self.axis == 1:
X = check_array(X, accept_sparse='csr', force_all_finite=False,
copy=False)
if sparse.issparse(X):
statistics = self._sparse_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
statistics = self._dense_fit(X,
self.strategy,
self.missing_values,
self.axis)
else:
X = check_array(X, accept_sparse='csc', force_all_finite=False,
copy=False)
statistics = self.statistics_
# Delete the invalid rows/columns
invalid_mask = np.isnan(statistics)
valid_mask = np.logical_not(invalid_mask)
valid_statistics = statistics[valid_mask]
valid_statistics_indexes = np.where(valid_mask)[0]
missing = np.arange(X.shape[not self.axis])[invalid_mask]
if self.axis == 0 and invalid_mask.any():
if self.verbose:
warnings.warn("Deleting features without "
"observed values: %s" % missing)
X = X[:, valid_statistics_indexes]
elif self.axis == 1 and invalid_mask.any():
raise ValueError("Some rows only contain "
"missing values: %s" % missing)
# Do actual imputation
if sparse.issparse(X) and self.missing_values != 0:
mask = _get_mask(X.data, self.missing_values)
indexes = np.repeat(np.arange(len(X.indptr) - 1, dtype=np.int),
np.diff(X.indptr))[mask]
X.data[mask] = valid_statistics[indexes].astype(X.dtype)
else:
if sparse.issparse(X):
X = X.toarray()
mask = _get_mask(X, self.missing_values)
n_missing = np.sum(mask, axis=self.axis)
values = np.repeat(valid_statistics, n_missing)
if self.axis == 0:
coordinates = np.where(mask.transpose())[::-1]
else:
coordinates = mask
X[coordinates] = values
return X
| |
import datetime
import logging
import logging.handlers
import os
import psutil
import random
import re
import string
import sys
import tempfile
import time
import unittest2
from hoplite.client.remote_job_manager import RemoteJobManager
from hoplite.exceptions import JobFailedError, TimeoutError
from hoplite.public_api import wait_for_hoplite
sys.path.append(os.path.realpath(__file__))
import remotable_test_resources.remotable_class as remotable_class
from remotable_test_resources.dummy_class import DummyClass
import remotable_test_resources.remotable_module as remotable_module
def start_hoplite_server(port_num):
proc = psutil.Popen(
'{} -c "import hoplite.main; hoplite.main.server_main([\'--port={}\'])"'.format(sys.executable, port_num)
)
wait_for_hoplite('localhost', port_num)
return proc
def tear_down_hoplite(process):
l = process.get_children()
for child in l:
child.terminate()
child.wait()
process.terminate()
process.wait()
time.sleep(.5)
class TestMetaClassInjection(unittest2.TestCase):
def setUp(self):
self.class_instance = remotable_class.TestClass()
def tearDown(self):
pass
def test_meta_class_injects_functions(self):
class_attribs = dir(self.class_instance)
function_names = [
'return_none',
'return_number',
'return_tuple',
'return_single_list',
'return_multiple_lists',
'do_math',
'pass_common_class',
'pass_custom_class',
'get_class_variables',
'raise_type_error',
'raise_custom_error',
'create_file',
'create_specified_file',
'call_nested_function',
'raise_private_error',
'raise_public_error',
'return_custom_exception',
'long_job',
'log_normal'
]
for name in function_names:
self.assertIn('remote_' + name, class_attribs)
self.assertIn('remote_async_' + name, class_attribs)
class TestModuleInjection(unittest2.TestCase):
def test_remotable_functions_injected_in_module(self):
class_attribs = dir(remotable_module)
function_names = [
'return_none',
'return_number',
'return_tuple',
'return_single_list',
'return_multiple_lists',
'do_math',
'pass_common_class',
'pass_custom_class',
'raise_type_error',
'raise_custom_error',
'create_specified_file',
'call_nested_function',
'raise_private_error',
'raise_public_error',
'return_custom_exception',
'long_job',
'log_normal'
]
for name in function_names:
self.assertIn('remote_' + name, class_attribs)
self.assertIn('remote_async_' + name, class_attribs)
class TestRemotableClassCapabilities(unittest2.TestCase):
def setUp(self):
"""
This function has to use subprocess instead of multiprocessing. Otherwise, tests fail
with the error "daemonic processes are not allowed to have children". This way, hoplite is
run in a separate process.
:return:
"""
self.proc = start_hoplite_server(5001)
self.class_instance = remotable_class.TestClass('this_is@a#string', 12349)
def tearDown(self):
tear_down_hoplite(self.proc)
time.sleep(.5)
def test_return_none(self):
ret = self.class_instance.remote_return_none('localhost:5001')
self.assertIsNone(ret)
def test_return_number(self):
ret = self.class_instance.remote_return_number('localhost:5001')
self.assertEqual(ret, 777)
def test_return_tuple(self):
ret = self.class_instance.remote_return_tuple('localhost:5001')
self.assertTrue(type(ret) == tuple)
self.assertTupleEqual(ret, ('All', 4, 1.11))
def test_return_list(self):
ret = self.class_instance.remote_return_single_list('localhost:5001')
self.assertTrue(type(ret) == list)
self.assertListEqual(ret, ['This', 'is', 'a', 'list'])
def test_return_multiple_lists(self):
ret = self.class_instance.remote_return_multiple_lists('localhost:5001')
self.assertTrue(type(ret) == tuple)
self.assertListEqual(ret[0], ['This', 'is', 1, 'list'])
self.assertListEqual(ret[1], ['and', 'this', 'is', 'another', 'list'])
def test_do_math(self):
ret = self.class_instance.remote_do_math('localhost:5001', 17, 13.5)
self.assertEqual(ret, 17 * 13.5)
def test_pass_common_class(self):
date = datetime.datetime.now()
ret = self.class_instance.remote_pass_common_class('localhost:5001', date)
self.assertEqual(ret[0], date.year)
self.assertEqual(ret[1], date.month)
self.assertEqual(ret[2], date.day)
self.assertEqual(ret[3], date.hour)
self.assertEqual(ret[4], date.minute)
self.assertEqual(ret[5], date.second)
self.assertEqual(ret[6], date.microsecond)
def test_pass_custom_class(self):
dummy = DummyClass(53, 35)
ret = self.class_instance.remote_pass_custom_class('localhost:5001', dummy)
self.assertEqual(ret, 88)
def test_get_class_variables(self):
ret = self.class_instance.remote_get_class_variables('localhost:5001')
self.assertTrue(type(ret) == tuple)
self.assertEqual(ret[0], 'this_is@a#string')
self.assertEqual(ret[1], 12349 * 2)
def test_raise_type_error(self):
with self.assertRaises(TypeError) as error_context:
self.class_instance.remote_raise_type_error('localhost:5001')
e = error_context.exception
self.assertEqual(e.message, '___Failure1')
def test_raise_custom_error(self):
with self.assertRaises(remotable_class.ExternalEmptyError) as error_context:
self.class_instance.remote_raise_custom_error('localhost:5001')
e = error_context.exception
self.assertEqual(e.message, '___Failure2')
def test_create_file(self):
filename = self.class_instance.remote_create_file('localhost:5001')
with open(filename) as fin:
self.assertEqual(fin.read().strip(), self.class_instance.file_contents)
os.remove(filename)
def test_call_nested_function(self):
with self.assertRaises(TypeError) as error_context:
self.class_instance.remote_call_nested_function('localhost:5001')
e = error_context.exception
self.assertEqual(e.message, '___Failure1')
def test_raise_private_error(self):
"""
Test how the metaclass deals with exceptions that it cannot reraise. In this case,
the MyPrivateError exception class is defined within the TestClass, and so it cannot
be pickled. That means that it cannot be re-raised, and so a JobFailedError should
be raised instead. However, we can still check that the MyPrivateError exception
was raised on the server end.
"""
with self.assertRaises(JobFailedError) as error_context:
self.class_instance.remote_raise_private_error('localhost:5001')
e = error_context.exception
self.assertIn('Root Error Message: ___Failure3', e.__str__())
match = re.search('Root Error Type: <class \'.*\.MyPrivateError\'>', e.__str__())
self.assertIsNotNone(match)
def test_raise_public_error(self):
with self.assertRaises(remotable_class.ExternalCustomError) as error_context:
self.class_instance.remote_raise_public_error('localhost:5001')
self.assertEqual(error_context.exception.message, '___Failure4')
def test_return_custom_exception(self):
"""
Tests whether non-trivial class instances can be returned
"""
ret = self.class_instance.remote_return_custom_exception('localhost:5001')
self.assertIsInstance(ret, remotable_class.ExternalCustomError)
self.assertEqual(ret.message, '___Failure4')
def test_return_none_start_join(self):
job = self.class_instance.remote_async_return_none('localhost:5001')
job.start()
ret = 'something_not_none'
ret = job.join()
self.assertIsNone(ret)
def test_return_number_start_join(self):
job = self.class_instance.remote_async_return_number('localhost:5001')
job.start()
ret = job.join()
self.assertEqual(ret, 777)
def test_create_specified_file_start(self):
# This function is considered unsafe, but it should be fine in this case since there are no security concerns
filename = tempfile.mktemp()
job = self.class_instance.remote_async_create_specified_file('localhost:5001', filename, 'specified_contents')
job.start()
self.assertFalse(os.path.exists(filename))
time.sleep(5) # Give time to create file
with open(filename) as fin:
self.assertEqual(fin.read().strip(), 'specified_contents')
os.remove(filename)
job.join()
def test_create_specified_file_default_contents_start(self):
# This function is considered unsafe, but it should be fine in this case since there are no security concerns
filename = tempfile.mktemp()
job = self.class_instance.remote_async_create_specified_file('localhost:5001', filename)
job.start()
self.assertFalse(os.path.exists(filename))
time.sleep(5) # Give time to create file
with open(filename) as fin:
self.assertEqual(fin.read().strip(), 'default_contents')
os.remove(filename)
job.join()
def test_derived_class(self):
"""
Tests that the remotify decorator works properly when applied to classes which use inheritance
"""
child_class_instance = remotable_class.ChildClass(30, 15)
child_class_members = dir(child_class_instance)
grandchild_class_instance = remotable_class.GrandchildClass(45, 30, 15)
grandchild_class_members = dir(grandchild_class_instance)
self.assertIn('remote_func_1', child_class_members)
self.assertIn('remote_func_2', child_class_members)
self.assertIn('remote_func_3', child_class_members)
self.assertIn('remote_func_4', child_class_members)
self.assertIn('remote_async_func_1', child_class_members)
self.assertIn('remote_async_func_2', child_class_members)
self.assertIn('remote_async_func_3', child_class_members)
self.assertIn('remote_async_func_4', child_class_members)
self.assertNotIn('remote_func_5', grandchild_class_members)
self.assertNotIn('remote_func_6', grandchild_class_members)
self.assertNotIn('remote_async_func_5', grandchild_class_members)
self.assertNotIn('remote_async_func_6', grandchild_class_members)
self.assertIn('func_1', grandchild_class_members)
self.assertIn('func_2', grandchild_class_members)
self.assertIn('func_3', grandchild_class_members)
self.assertIn('func_4', grandchild_class_members)
self.assertIn('remote_func_1', grandchild_class_members)
self.assertIn('remote_func_2', grandchild_class_members)
self.assertIn('remote_func_3', grandchild_class_members)
self.assertIn('remote_func_4', grandchild_class_members)
self.assertIn('remote_async_func_1', grandchild_class_members)
self.assertIn('remote_async_func_2', grandchild_class_members)
self.assertIn('remote_async_func_3', grandchild_class_members)
self.assertIn('remote_async_func_4', grandchild_class_members)
time.sleep(1) # Extra delay, since this test seems sporadic
self.assertEqual(child_class_instance.remote_func_3('localhost:5001', 3), 3 + 3 + 15 + 30)
self.assertEqual(child_class_instance.remote_func_4('localhost:5001', 6), 4 + 6 + 15 + 30)
self.assertEqual(grandchild_class_instance.remote_func_3('localhost:5001', 9), 3 + 9 + 15 + 30)
self.assertEqual(grandchild_class_instance.remote_func_4('localhost:5001', 12), 4 + 12 + 15 + 30)
def test_job_timeout(self):
with self.assertRaises(TimeoutError):
self.class_instance.remote_long_job('localhost:5001', remote_timeout=3)
def test_job_does_not_timeout(self):
self.class_instance.remote_long_job('localhost:5001', remote_timeout=12)
def test_async_job_timeout(self):
with self.assertRaises(TimeoutError):
job = self.class_instance.remote_async_long_job('localhost:5001')
job.start()
job.join(3)
def test_async_job_does_not_timeout(self):
job = self.class_instance.remote_async_long_job('localhost:5001')
job.start()
job.join(12)
''' Still need to add support for static functions - may need to write another Hoplite plugin to handle them
def test_call_static_function(self):
ret = remotable_class.TestClass.remote_static_return_number('localhost:5001')
self.assertEqual(ret, 999)
'''
class TestRemotableModuleCapabilities(unittest2.TestCase):
def setUp(self):
self.proc = start_hoplite_server(5001)
self.manager = RemoteJobManager("localhost", 5001)
def tearDown(self):
tear_down_hoplite(self.proc)
time.sleep(.5)
def test_return_none(self):
ret = remotable_module.remote_return_none('localhost:5001')
self.assertIsNone(ret)
def test_return_number(self):
ret = remotable_module.remote_return_number('localhost:5001')
self.assertEqual(ret, 777)
def test_return_tuple(self):
ret = remotable_module.remote_return_tuple('localhost:5001')
self.assertTrue(type(ret) == tuple)
self.assertTupleEqual(ret, ('All', 4, 1.11))
def test_return_list(self):
ret = remotable_module.remote_return_single_list('localhost:5001')
self.assertTrue(type(ret) == list)
self.assertListEqual(ret, ['This', 'is', 'a', 'list'])
def test_return_multiple_lists(self):
ret = remotable_module.remote_return_multiple_lists('localhost:5001')
self.assertTrue(type(ret) == tuple)
self.assertListEqual(ret[0], ['This', 'is', 1, 'list'])
self.assertListEqual(ret[1], ['and', 'this', 'is', 'another', 'list'])
def test_do_math(self):
ret = remotable_module.remote_do_math('localhost:5001', 17, 13.5)
self.assertEqual(ret, 17 * 13.5)
def test_pass_common_class(self):
date = datetime.datetime.now()
ret = remotable_module.remote_pass_common_class('localhost:5001', date)
self.assertEqual(ret[0], date.year)
self.assertEqual(ret[1], date.month)
self.assertEqual(ret[2], date.day)
self.assertEqual(ret[3], date.hour)
self.assertEqual(ret[4], date.minute)
self.assertEqual(ret[5], date.second)
self.assertEqual(ret[6], date.microsecond)
def test_pass_custom_class(self):
dummy = DummyClass(53, 35)
ret = remotable_module.remote_pass_custom_class('localhost:5001', dummy)
self.assertEqual(ret, 88)
def test_raise_type_error(self):
with self.assertRaises(TypeError) as error_context:
remotable_module.remote_raise_type_error('localhost:5001')
e = error_context.exception
self.assertEqual(e.message, '___Failure1')
def test_raise_custom_error(self):
with self.assertRaises(remotable_module.ExternalEmptyError) as error_context:
remotable_module.remote_raise_custom_error('localhost:5001')
e = error_context.exception
self.assertEqual(e.message, '___Failure2')
def test_call_nested_function(self):
with self.assertRaises(TypeError) as error_context:
remotable_module.remote_call_nested_function('localhost:5001')
e = error_context.exception
self.assertEqual(e.message, '___Failure1')
def test_raise_private_error(self):
"""
Test how the metaclass deals with exceptions that it cannot reraise. In this case,
the MyPrivateError exception class is defined within the TestClass, and so it cannot
be pickled. That means that it cannot be re-raised, and so a JobFailedError should
be raised instead. However, we can still check that the MyPrivateError exception
was raised on the server end.
"""
with self.assertRaises(JobFailedError) as error_context:
remotable_module.remote_raise_private_error('localhost:5001')
e = error_context.exception
self.assertIn('Root Error Message: ___Failure3', e.__str__())
match = re.search('Root Error Type: <class \'.*\.MyPrivateError\'>', e.__str__())
self.assertIsNotNone(match)
def test_raise_public_error(self):
with self.assertRaises(remotable_class.ExternalCustomError) as error_context:
remotable_module.remote_raise_public_error('localhost:5001')
self.assertEqual(error_context.exception.message, '___Failure4')
def test_return_custom_exception(self):
"""
Tests whether non-trivial class instances (such as exceptions) can be returned
"""
ret = remotable_module.remote_return_custom_exception('localhost:5001')
self.assertIsInstance(ret, remotable_class.ExternalCustomError)
self.assertEqual(ret.message, '___Failure4')
def test_return_none_start_join(self):
job = remotable_module.remote_async_return_none('localhost:5001')
job.start()
ret = 'something_not_none'
ret = job.join()
self.assertIsNone(ret)
def test_return_number_start_join(self):
job = remotable_module.remote_async_return_number('localhost:5001')
job.start()
ret = job.join()
self.assertEqual(ret, 777)
def test_create_specified_file_start(self):
# This function is considered unsafe, but it should be fine in this case since there are no security concerns
filename = tempfile.mktemp()
job = remotable_module.remote_async_create_specified_file('localhost:5001', filename, 'specified_contents')
job.start()
self.assertFalse(os.path.exists(filename))
time.sleep(3) # Give time to create file
with open(filename) as fin:
self.assertEqual(fin.read().strip(), 'specified_contents')
os.remove(filename)
job.join()
def test_create_specified_file_default_contents_start(self):
# This function is considered unsafe, but it should be fine in this case since there are no security concerns
filename = tempfile.mktemp()
job = remotable_module.remote_async_create_specified_file('localhost:5001', filename)
job.start()
self.assertFalse(os.path.exists(filename))
time.sleep(3) # Give time to create file
with open(filename) as fin:
self.assertEqual(fin.read().strip(), 'default_contents')
os.remove(filename)
job.join()
def test_job_timeout(self):
with self.assertRaises(TimeoutError):
remotable_module.remote_long_job('localhost:5001', remote_timeout=3)
def test_job_does_not_timeout(self):
remotable_module.remote_long_job('localhost:5001', remote_timeout=12)
def test_async_job_timeout(self):
with self.assertRaises(TimeoutError):
job = remotable_module.remote_async_long_job('localhost:5001')
job.start()
job.join(3)
def test_async_job_does_not_timeout(self):
job = remotable_module.remote_async_long_job('localhost:5001')
job.start()
job.join(12)
class TestRemotableClassClientLogging(unittest2.TestCase):
def setUp(self):
self.logger = logging.getLogger('hoplite.remote_enabler')
self.logger_path = tempfile.mktemp()
self.logger_handler = logging.FileHandler(self.logger_path)
self.logger.addHandler(self.logger_handler)
self.logger.setLevel(logging.DEBUG)
self.proc = start_hoplite_server(5001)
self.manager = RemoteJobManager("localhost", 5001)
self.class_instance = remotable_class.TestClass('this_is@a#string', 12349)
def tearDown(self):
tear_down_hoplite(self.proc)
self.logger_handler.close()
self.logger.removeHandler(self.logger_handler)
def test_logging__remote_func(self):
ret = self.class_instance.remote_do_math('localhost:5001', 2, 3)
self.assertEqual(ret, 6)
with open(self.logger_path) as fin:
all_lines = fin.readlines()
self.assertEqual(all_lines[0].strip(), '"{0}" on target "{1}" with args: {2} and kwargs: {3}'.format(
'do_math', 'localhost:5001', (2, 3), {}))
self.assertEqual(all_lines[1].strip(), '"{0}" on target "{1}" returned {2}'.format('do_math', 'localhost:5001', 6))
def test_logging__remote_async_func(self):
job = self.class_instance.remote_async_do_math('localhost:5001', 2, 3)
job.start()
ret = job.join()
self.assertEqual(ret, 6)
with open(self.logger_path) as fin:
all_lines = fin.readlines()
self.assertEqual(all_lines[0].strip(), 'Creating job "{0}" on target "{1}" with args: {2} and kwargs: {3}'.format(
'do_math', 'localhost:5001', (2, 3), {}))
self.assertIsNotNone(r'Starting "do_math\(.*\)" on "localhost:5001"', all_lines[1].strip())
self.assertEqual(all_lines[2].strip(), 'Joining "{0}" on "{1}"'.format('do_math', 'localhost:5001'))
self.assertEqual(all_lines[3].strip(), '"{0}" on target "{1}" returned {2}'.format(
'do_math', 'localhost:5001', 6))
class TestRemotableModuleClientLogging(unittest2.TestCase):
def setUp(self):
self.logger = logging.getLogger('hoplite.remote_enabler')
self.logger_path = tempfile.mktemp()
self.logger_handler = logging.FileHandler(self.logger_path)
self.logger.addHandler(self.logger_handler)
self.logger.setLevel(logging.DEBUG)
self.proc = start_hoplite_server(5001)
self.manager = RemoteJobManager("localhost", 5001)
def tearDown(self):
tear_down_hoplite(self.proc)
self.logger_handler.close()
self.logger.removeHandler(self.logger_handler)
def test_logging__remote_module_func(self):
ret = remotable_module.remote_do_math('localhost:5001', 2, 3)
self.assertEqual(ret, 6)
with open(self.logger_path) as fin:
all_lines = fin.readlines()
self.assertEqual(all_lines[0].strip(), '"{0}" on target "{1}" with args: {2} and kwargs: {3}'.format(
'do_math', 'localhost:5001', (2, 3), {}))
self.assertEqual(all_lines[1].strip(), '"{0}" on target "{1}" returned {2}'.format('do_math', 'localhost:5001', 6))
def test_logging__remote_module_async_func(self):
job = remotable_module.remote_async_do_math('localhost:5001', 2, 3)
job.start()
ret = job.join()
self.assertEqual(ret, 6)
with open(self.logger_path) as fin:
all_lines = fin.readlines()
self.assertEqual(all_lines[0].strip(), 'Creating job "{0}" on target "{1}" with args: {2} and kwargs: {3}'.format(
'do_math', 'localhost:5001', (2, 3), {}))
self.assertIsNotNone(r'Starting "do_math\(.*\)" on "localhost:5001"', all_lines[1].strip())
self.assertEqual(all_lines[2].strip(), 'Joining "{0}" on "{1}"'.format('do_math', 'localhost:5001'))
self.assertEqual(all_lines[3].strip(), '"{0}" on target "{1}" returned {2}'.format(
'do_math', 'localhost:5001', 6))
class TestRemotableClassServerLogging(unittest2.TestCase):
def setUp(self):
self.proc = start_hoplite_server(5001)
self.manager = RemoteJobManager("localhost", 5001)
self.class_instance = remotable_class.TestClass('this_is@a#string', 12349)
def tearDown(self):
tear_down_hoplite(self.proc)
def test_logging_log_normal(self):
rand_string_1 = get_random_string(5, 10)
rand_string_2 = get_random_string(5, 10)
log_folder = r'C:\logs\hoplite\remoted_functions\tests\remotable_test_resources\remotable_class\TestClass'
ret = self.class_instance.remote_log_normal('localhost:5001', rand_string_1, dummy_var_2=rand_string_2)
latest_log_file = max(
[os.path.join(log_folder, filename) for filename in os.listdir(log_folder)], key=os.path.getctime
)
self.assertEqual(ret, '{} + {}'.format(rand_string_1, rand_string_2))
with open(os.path.join(log_folder, latest_log_file)) as fin:
all_lines = fin.readlines()
self.assertEqual(len(all_lines), 3)
self.assertIn("Beginning execution of log_normal with args: ('{}',) and kwargs: {{'dummy_var_2': '{}'}}".format(
rand_string_1, rand_string_2), all_lines[0]
)
self.assertIn('Logging in log_normal function', all_lines[1])
self.assertIn(
'Returning from log_normal with return value(s): {} + {}'.format(rand_string_1, rand_string_2), all_lines[2]
)
os.remove(latest_log_file)
def test_logging_log_exception(self):
log_folder = r'C:\logs\hoplite\remoted_functions\tests\remotable_test_resources\remotable_class\TestClass'
with self.assertRaises(TypeError) as error_context:
self.class_instance.remote_raise_type_error('localhost:5001')
self.assertEqual(error_context.exception.message, '___Failure1')
latest_log_file = max(
[os.path.join(log_folder, filename) for filename in os.listdir(log_folder)], key=os.path.getctime
)
with open(os.path.join(log_folder, latest_log_file)) as fin:
all_lines = fin.readlines()
self.assertEqual(len(all_lines), 8)
self.assertIn('Beginning execution of raise_type_error with args: () and kwargs: {}', all_lines[0])
self.assertIn('An exception occurred', all_lines[1])
self.assertIn('Traceback', all_lines[2])
self.assertIn('in run', all_lines[3])
self.assertIn('TypeError: ___Failure1', all_lines[7])
os.remove(latest_log_file)
def test_logging_nested_logs(self):
log_folder = r'C:\logs\hoplite\remoted_functions\tests\remotable_test_resources\remotable_class\TestClass'
self.class_instance.remote_log_nested_caller('localhost:5001')
latest_log_file = max(
[os.path.join(log_folder, filename) for filename in os.listdir(log_folder)], key=os.path.getctime
)
with open(os.path.join(log_folder, latest_log_file)) as fin:
all_lines = fin.readlines()
self.assertEqual(len(all_lines), 4)
self.assertIn('Currently in class caller function', all_lines[1])
self.assertIn('Currently in class callee function', all_lines[2])
os.remove(latest_log_file)
class TestRemotableModuleServerLogging(unittest2.TestCase):
def setUp(self):
self.proc = start_hoplite_server(5001)
self.manager = RemoteJobManager("localhost", 5001)
def tearDown(self):
tear_down_hoplite(self.proc)
def test_logging_log_normal(self):
rand_string_1 = get_random_string(5, 10)
rand_string_2 = get_random_string(5, 10)
log_folder = r'C:\logs\hoplite\remoted_functions\tests\remotable_test_resources\remotable_module'
ret = remotable_module.remote_log_normal('localhost:5001', rand_string_1, dummy_var_2=rand_string_2)
latest_log_file = max(
[os.path.join(log_folder, filename) for filename in os.listdir(log_folder)], key=os.path.getctime
)
self.assertEqual(ret, '{} + {}'.format(rand_string_1, rand_string_2))
with open(os.path.join(log_folder, latest_log_file)) as fin:
all_lines = fin.readlines()
self.assertEqual(len(all_lines), 3)
self.assertIn("Beginning execution of log_normal with args: ('{}',) and kwargs: {{'dummy_var_2': '{}'}}".format(
rand_string_1, rand_string_2), all_lines[0]
)
self.assertIn('Logging in log_normal function', all_lines[1])
self.assertIn(
'Returning from log_normal with return value(s): {} + {}'.format(rand_string_1, rand_string_2), all_lines[2]
)
os.remove(latest_log_file)
def test_logging_log_exception(self):
log_folder = r'C:\logs\hoplite\remoted_functions\tests\remotable_test_resources\remotable_module'
with self.assertRaises(TypeError) as error_context:
remotable_module.remote_raise_type_error('localhost:5001')
self.assertEqual(error_context.exception.message, '___Failure1')
latest_log_file = max(
[os.path.join(log_folder, filename) for filename in os.listdir(log_folder)], key=os.path.getctime
)
with open(os.path.join(log_folder, latest_log_file)) as fin:
all_lines = fin.readlines()
self.assertEqual(len(all_lines), 8)
self.assertIn('Beginning execution of raise_type_error with args: () and kwargs: {}', all_lines[0])
self.assertIn('An exception occurred', all_lines[1])
self.assertIn('Traceback', all_lines[2])
self.assertIn('in run', all_lines[3])
self.assertIn('TypeError: ___Failure1', all_lines[7])
os.remove(latest_log_file)
def test_logging_nested_logs(self):
log_folder = r'C:\logs\hoplite\remoted_functions\tests\remotable_test_resources\remotable_module'
remotable_module.remote_log_nested_caller('localhost:5001')
latest_log_file = max(
[os.path.join(log_folder, filename) for filename in os.listdir(log_folder)], key=os.path.getctime
)
with open(os.path.join(log_folder, latest_log_file)) as fin:
all_lines = fin.readlines()
self.assertEqual(len(all_lines), 4)
self.assertIn('Currently in caller function', all_lines[1])
self.assertIn('Currently in callee function', all_lines[2])
os.remove(latest_log_file)
def get_random_string(min_length, max_length):
return ''.join(random.choice(string.ascii_lowercase + string.ascii_uppercase + string.digits)
for _ in range(random.randint(min_length, max_length)))
| |
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import os
import json
import mimetypes
from tornado import (
web,
gen,
)
from tornado.log import app_log
from tornado.escape import url_unescape
from ..base import (
AddSlashHandler,
BaseHandler,
cached,
RemoveSlashHandler,
RenderingHandler,
)
from ...utils import (
base64_decode,
quote,
response_text,
)
from .client import AsyncGitHubClient
PROVIDER_CTX = {
'provider_label': 'GitHub',
'provider_icon': 'github',
}
class GithubClientMixin(object):
@property
def github_client(self):
"""Create an upgraded github API client from the HTTP client"""
if getattr(self, "_github_client", None) is None:
self._github_client = AsyncGitHubClient(self.client)
return self._github_client
def client_error_message(self, exc, url, body, msg=None):
if exc.code == 403 and 'rate limit' in body.lower():
return 503, "GitHub API rate limit exceeded. Try again soon."
return super(GithubClientMixin, self).client_error_message(
exc, url, body, msg
)
class RawGitHubURLHandler(BaseHandler):
"""redirect old /urls/raw.github urls to /github/ API urls"""
def get(self, user, repo, path):
new_url = u'{format}/github/{user}/{repo}/blob/{path}'.format(
format=self.format_prefix, user=user, repo=repo, path=path,
)
app_log.info("Redirecting %s to %s", self.request.uri, new_url)
self.redirect(new_url)
class GitHubRedirectHandler(GithubClientMixin, BaseHandler):
"""redirect github blob|tree|raw urls to /github/ API urls"""
def get(self, user, repo, app, ref, path):
if app == 'raw':
app = 'blob'
new_url = u'{format}/github/{user}/{repo}/{app}/{ref}/{path}'.format(
format=self.format_prefix, user=user, repo=repo, app=app,
ref=ref, path=path,
)
app_log.info("Redirecting %s to %s", self.request.uri, new_url)
self.redirect(new_url)
class GitHubUserHandler(GithubClientMixin, BaseHandler):
"""list a user's github repos"""
@cached
@gen.coroutine
def get(self, user):
page = self.get_argument("page", None)
params = {'sort' : 'updated'}
if page:
params['page'] = page
with self.catch_client_error():
response = yield self.github_client.get_repos(user, params=params)
prev_url, next_url = self.get_page_links(response)
repos = json.loads(response_text(response))
entries = []
for repo in repos:
entries.append(dict(
url=repo['name'],
name=repo['name'],
))
provider_url = u"https://github.com/{user}".format(user=user)
html = self.render_template("userview.html",
entries=entries, provider_url=provider_url,
next_url=next_url, prev_url=prev_url,
**PROVIDER_CTX
)
yield self.cache_and_finish(html)
class GitHubRepoHandler(BaseHandler):
"""redirect /github/user/repo to .../tree/master"""
def get(self, user, repo):
self.redirect("%s/github/%s/%s/tree/master/" % (self.format_prefix, user, repo))
class GitHubTreeHandler(GithubClientMixin, BaseHandler):
"""list files in a github repo (like github tree)"""
@cached
@gen.coroutine
def get(self, user, repo, ref, path):
if not self.request.uri.endswith('/'):
self.redirect(self.request.uri + '/')
return
path = path.rstrip('/')
with self.catch_client_error():
response = yield self.github_client.get_contents(user, repo, path, ref=ref)
contents = json.loads(response_text(response))
branches, tags = yield self.refs(user, repo)
for nav_ref in branches + tags:
nav_ref["url"] = (u"/github/{user}/{repo}/tree/{ref}/{path}"
.format(
ref=nav_ref["name"], user=user, repo=repo, path=path
))
if not isinstance(contents, list):
app_log.info(
"{format}/{user}/{repo}/{ref}/{path} not tree, redirecting to blob",
extra=dict(format=self.format_prefix, user=user, repo=repo, ref=ref, path=path)
)
self.redirect(
u"{format}/github/{user}/{repo}/blob/{ref}/{path}".format(
format=self.format_prefix, user=user, repo=repo, ref=ref, path=path,
)
)
return
base_url = u"/github/{user}/{repo}/tree/{ref}".format(
user=user, repo=repo, ref=ref,
)
provider_url = u"https://github.com/{user}/{repo}/tree/{ref}/{path}".format(
user=user, repo=repo, ref=ref, path=path,
)
breadcrumbs = [{
'url' : base_url,
'name' : repo,
}]
breadcrumbs.extend(self.breadcrumbs(path, base_url))
entries = []
dirs = []
ipynbs = []
others = []
for file in contents:
e = {}
e['name'] = file['name']
if file['type'] == 'dir':
e['url'] = u'/github/{user}/{repo}/tree/{ref}/{path}'.format(
user=user, repo=repo, ref=ref, path=file['path']
)
e['url'] = quote(e['url'])
e['class'] = 'fa-folder-open'
dirs.append(e)
elif file['name'].endswith('.ipynb'):
e['url'] = u'/github/{user}/{repo}/blob/{ref}/{path}'.format(
user=user, repo=repo, ref=ref, path=file['path']
)
e['url'] = quote(e['url'])
e['class'] = 'fa-book'
ipynbs.append(e)
elif file['html_url']:
e['url'] = file['html_url']
e['class'] = 'fa-share'
others.append(e)
else:
# submodules don't have html_url
e['url'] = ''
e['class'] = 'fa-folder-close'
others.append(e)
entries.extend(dirs)
entries.extend(ipynbs)
entries.extend(others)
html = self.render_template("treelist.html",
entries=entries, breadcrumbs=breadcrumbs, provider_url=provider_url,
user=user, repo=repo, ref=ref, path=path,
branches=branches, tags=tags, tree_type="github",
tree_label="repositories",
**PROVIDER_CTX
)
yield self.cache_and_finish(html)
@gen.coroutine
def refs(self, user, repo):
"""get branches and tags for this user/repo"""
ref_types = ("branches", "tags")
ref_data = [None, None]
for i, ref_type in enumerate(ref_types):
with self.catch_client_error():
response = yield getattr(self.github_client, "get_%s" % ref_type)(user, repo)
ref_data[i] = json.loads(response_text(response))
raise gen.Return(ref_data)
class GitHubBlobHandler(GithubClientMixin, RenderingHandler):
"""handler for files on github
If it's a...
- notebook, render it
- non-notebook file, serve file unmodified
- directory, redirect to tree
"""
@cached
@gen.coroutine
def get(self, user, repo, ref, path):
raw_url = u"https://raw.githubusercontent.com/{user}/{repo}/{ref}/{path}".format(
user=user, repo=repo, ref=ref, path=quote(path)
)
blob_url = u"https://github.com/{user}/{repo}/blob/{ref}/{path}".format(
user=user, repo=repo, ref=ref, path=quote(path),
)
with self.catch_client_error():
tree_entry = yield self.github_client.get_tree_entry(
user, repo, path=url_unescape(path), ref=ref
)
if tree_entry['type'] == 'tree':
tree_url = "/github/{user}/{repo}/tree/{ref}/{path}/".format(
user=user, repo=repo, ref=ref, path=quote(path),
)
app_log.info("%s is a directory, redirecting to %s", self.request.path, tree_url)
self.redirect(tree_url)
return
# fetch file data from the blobs API
with self.catch_client_error():
response = yield self.github_client.fetch(tree_entry['url'])
data = json.loads(response_text(response))
contents = data['content']
if data['encoding'] == 'base64':
# filedata will be bytes
filedata = base64_decode(contents)
else:
# filedata will be unicode
filedata = contents
if path.endswith('.ipynb'):
dir_path = path.rsplit('/', 1)[0]
base_url = "/github/{user}/{repo}/tree/{ref}".format(
user=user, repo=repo, ref=ref,
)
breadcrumbs = [{
'url' : base_url,
'name' : repo,
}]
breadcrumbs.extend(self.breadcrumbs(dir_path, base_url))
try:
# filedata may be bytes, but we need text
if isinstance(filedata, bytes):
nbjson = filedata.decode('utf-8')
else:
nbjson = filedata
except Exception as e:
app_log.error("Failed to decode notebook: %s", raw_url, exc_info=True)
raise web.HTTPError(400)
yield self.finish_notebook(nbjson, raw_url,
provider_url=blob_url,
breadcrumbs=breadcrumbs,
msg="file from GitHub: %s" % raw_url,
public=True,
format=self.format,
request=self.request,
**PROVIDER_CTX
)
else:
mime, enc = mimetypes.guess_type(path)
self.set_header("Content-Type", mime or 'text/plain')
self.cache_and_finish(filedata)
def default_handlers(handlers=[]):
"""Tornado handlers"""
return [
# ideally these URIs should have been caught by an appropriate
# uri_rewrite rather than letting the url provider catch them and then
# fixing it here.
# There are probably links in the wild that depend on these, so keep
# these handlers for backwards compatibility.
(r'/url[s]?/github\.com/([^\/]+)/([^\/]+)/(tree|blob|raw)/([^\/]+)/(.*)', GitHubRedirectHandler),
(r'/url[s]?/raw\.?github\.com/([^\/]+)/([^\/]+)/(.*)', RawGitHubURLHandler),
(r'/url[s]?/raw\.?githubusercontent\.com/([^\/]+)/([^\/]+)/(.*)', RawGitHubURLHandler),
] + handlers + [
(r'/github/([^\/]+)', AddSlashHandler),
(r'/github/([^\/]+)/', GitHubUserHandler),
(r'/github/([^\/]+)/([^\/]+)', AddSlashHandler),
(r'/github/([^\/]+)/([^\/]+)/', GitHubRepoHandler),
(r'/github/([^\/]+)/([^\/]+)/blob/([^\/]+)/(.*)/', RemoveSlashHandler),
(r'/github/([^\/]+)/([^\/]+)/blob/([^\/]+)/(.*)', GitHubBlobHandler),
(r'/github/([^\/]+)/([^\/]+)/tree/([^\/]+)', AddSlashHandler),
(r'/github/([^\/]+)/([^\/]+)/tree/([^\/]+)/(.*)', GitHubTreeHandler),
]
def uri_rewrites(rewrites=[]):
github_rewrites = [
# three different uris for a raw view
(r'^https?://github\.com/([^\/]+)/([^\/]+)/raw/([^\/]+)/(.*)',
u'/github/{0}/{1}/blob/{2}/{3}'),
(r'^https?://raw\.github\.com/([^\/]+)/([^\/]+)/(.*)',
u'/github/{0}/{1}/blob/{2}'),
(r'^https?://raw\.githubusercontent\.com/([^\/]+)/([^\/]+)/(.*)',
u'/github/{0}/{1}/blob/{2}'),
# trees & blobs
(r'^https?://github.com/([\w\-]+)/([^\/]+)/(blob|tree)/(.*)$',
u'/github/{0}/{1}/{2}/{3}'),
# user/repo
(r'^([\w\-]+)/([^\/]+)$',
u'/github/{0}/{1}/tree/master/'),
# user
(r'^([\w\-]+)$',
u'/github/{0}/'),
]
# github enterprise
if os.environ.get('GITHUB_API_URL', '') != '':
github_api_url = os.environ.get('GITHUB_API_URL')
github_rewrites.extend([
# raw view
(r'^' + github_api_url.split('/api/v3/')[0]
+ '/([^\/]+)/([^\/]+)/raw/([^\/]+)/(.*)',
u'/github/{0}/{1}/blob/{2}/{3}'),
# trees & blobs
(r'^' + github_api_url.split('/api/v3/')[0]
+ '/([\w\-]+)/([^\/]+)/(blob|tree)/(.*)$',
u'/github/{0}/{1}/{2}/{3}'),
])
return rewrites + github_rewrites
| |
# Copyright (c) 2012-2014 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2013 Amin Farmahini-Farahani
# Copyright (c) 2015 University of Kaiserslautern
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
# Ani Udipi
# Omar Naji
# Matthias Jung
from m5.params import *
from AbstractMemory import *
# Enum for memory scheduling algorithms, currently First-Come
# First-Served and a First-Row Hit then First-Come First-Served
class MemSched(Enum): vals = ['fcfs', 'frfcfs']
# Enum for the address mapping. With Ch, Ra, Ba, Ro and Co denoting
# channel, rank, bank, row and column, respectively, and going from
# MSB to LSB. Available are RoRaBaChCo and RoRaBaCoCh, that are
# suitable for an open-page policy, optimising for sequential accesses
# hitting in the open row. For a closed-page policy, RoCoRaBaCh
# maximises parallelism.
class AddrMap(Enum): vals = ['RoRaBaChCo', 'RoRaBaCoCh', 'RoCoRaBaCh']
# Enum for the page policy, either open, open_adaptive, close, or
# close_adaptive.
class PageManage(Enum): vals = ['open', 'open_adaptive', 'close',
'close_adaptive']
# DRAMCtrl is a single-channel single-ported DRAM controller model
# that aims to model the most important system-level performance
# effects of a DRAM without getting into too much detail of the DRAM
# itself.
class DRAMCtrl(AbstractMemory):
type = 'DRAMCtrl'
cxx_header = "mem/dram_ctrl.hh"
# single-ported on the system interface side, instantiate with a
# bus in front of the controller for multiple ports
port = SlavePort("Slave port")
# the basic configuration of the controller architecture, note
# that each entry corresponds to a burst for the specific DRAM
# configuration (e.g. x32 with burst length 8 is 32 bytes) and not
# the cacheline size or request/packet size
write_buffer_size = Param.Unsigned(64, "Number of write queue entries")
read_buffer_size = Param.Unsigned(32, "Number of read queue entries")
# threshold in percent for when to forcefully trigger writes and
# start emptying the write buffer
write_high_thresh_perc = Param.Percent(85, "Threshold to force writes")
# threshold in percentage for when to start writes if the read
# queue is empty
write_low_thresh_perc = Param.Percent(50, "Threshold to start writes")
# minimum write bursts to schedule before switching back to reads
min_writes_per_switch = Param.Unsigned(16, "Minimum write bursts before "
"switching to reads")
# scheduler, address map and page policy
mem_sched_policy = Param.MemSched('frfcfs', "Memory scheduling policy")
addr_mapping = Param.AddrMap('RoRaBaCoCh', "Address mapping policy")
page_policy = Param.PageManage('open_adaptive', "Page management policy")
# enforce a limit on the number of accesses per row
max_accesses_per_row = Param.Unsigned(16, "Max accesses per row before "
"closing");
# size of DRAM Chip in Bytes
device_size = Param.MemorySize("Size of DRAM chip")
# pipeline latency of the controller and PHY, split into a
# frontend part and a backend part, with reads and writes serviced
# by the queues only seeing the frontend contribution, and reads
# serviced by the memory seeing the sum of the two
static_frontend_latency = Param.Latency("10ns", "Static frontend latency")
static_backend_latency = Param.Latency("10ns", "Static backend latency")
# the physical organisation of the DRAM
device_bus_width = Param.Unsigned("data bus width in bits for each DRAM "\
"device/chip")
burst_length = Param.Unsigned("Burst lenght (BL) in beats")
device_rowbuffer_size = Param.MemorySize("Page (row buffer) size per "\
"device/chip")
devices_per_rank = Param.Unsigned("Number of devices/chips per rank")
ranks_per_channel = Param.Unsigned("Number of ranks per channel")
# default to 0 bank groups per rank, indicating bank group architecture
# is not used
# update per memory class when bank group architecture is supported
bank_groups_per_rank = Param.Unsigned(0, "Number of bank groups per rank")
banks_per_rank = Param.Unsigned("Number of banks per rank")
# only used for the address mapping as the controller by
# construction is a single channel and multiple controllers have
# to be instantiated for a multi-channel configuration
channels = Param.Unsigned(1, "Number of channels")
# For power modelling we need to know if the DRAM has a DLL or not
dll = Param.Bool(True, "DRAM has DLL or not")
# DRAMPower provides in addition to the core power, the possibility to
# include RD/WR termination and IO power. This calculation assumes some
# default values. The integration of DRAMPower with gem5 does not include
# IO and RD/WR termination power by default. This might be added as an
# additional feature in the future.
# timing behaviour and constraints - all in nanoseconds
# the base clock period of the DRAM
tCK = Param.Latency("Clock period")
# the amount of time in nanoseconds from issuing an activate command
# to the data being available in the row buffer for a read/write
tRCD = Param.Latency("RAS to CAS delay")
# the time from issuing a read/write command to seeing the actual data
tCL = Param.Latency("CAS latency")
# minimum time between a precharge and subsequent activate
tRP = Param.Latency("Row precharge time")
# minimum time between an activate and a precharge to the same row
tRAS = Param.Latency("ACT to PRE delay")
# minimum time between a write data transfer and a precharge
tWR = Param.Latency("Write recovery time")
# minimum time between a read and precharge command
tRTP = Param.Latency("Read to precharge")
# time to complete a burst transfer, typically the burst length
# divided by two due to the DDR bus, but by making it a parameter
# it is easier to also evaluate SDR memories like WideIO.
# This parameter has to account for burst length.
# Read/Write requests with data size larger than one full burst are broken
# down into multiple requests in the controller
# tBURST is equivalent to the CAS-to-CAS delay (tCCD)
# With bank group architectures, tBURST represents the CAS-to-CAS
# delay for bursts to different bank groups (tCCD_S)
tBURST = Param.Latency("Burst duration (for DDR burst length / 2 cycles)")
# CAS-to-CAS delay for bursts to the same bank group
# only utilized with bank group architectures; set to 0 for default case
# tBURST is equivalent to tCCD_S; no explicit parameter required
# for CAS-to-CAS delay for bursts to different bank groups
tCCD_L = Param.Latency("0ns", "Same bank group CAS to CAS delay")
# time taken to complete one refresh cycle (N rows in all banks)
tRFC = Param.Latency("Refresh cycle time")
# refresh command interval, how often a "ref" command needs
# to be sent. It is 7.8 us for a 64ms refresh requirement
tREFI = Param.Latency("Refresh command interval")
# write-to-read, same rank turnaround penalty
tWTR = Param.Latency("Write to read, same rank switching time")
# read-to-write, same rank turnaround penalty
tRTW = Param.Latency("Read to write, same rank switching time")
# rank-to-rank bus delay penalty
# this does not correlate to a memory timing parameter and encompasses:
# 1) RD-to-RD, 2) WR-to-WR, 3) RD-to-WR, and 4) WR-to-RD
# different rank bus delay
tCS = Param.Latency("Rank to rank switching time")
# minimum row activate to row activate delay time
tRRD = Param.Latency("ACT to ACT delay")
# only utilized with bank group architectures; set to 0 for default case
tRRD_L = Param.Latency("0ns", "Same bank group ACT to ACT delay")
# time window in which a maximum number of activates are allowed
# to take place, set to 0 to disable
tXAW = Param.Latency("X activation window")
activation_limit = Param.Unsigned("Max number of activates in window")
# time to exit power-down mode
# Exit power-down to next valid command delay
tXP = Param.Latency("0ns", "Power-up Delay")
# Exit Powerdown to commands requiring a locked DLL
tXPDLL = Param.Latency("0ns", "Power-up Delay with locked DLL")
# time to exit self-refresh mode
tXS = Param.Latency("0ns", "Self-refresh exit latency")
# time to exit self-refresh mode with locked DLL
tXSDLL = Param.Latency("0ns", "Self-refresh exit latency DLL")
# Currently rolled into other params
######################################################################
# tRC - assumed to be tRAS + tRP
# Power Behaviour and Constraints
# DRAMs like LPDDR and WideIO have 2 external voltage domains. These are
# defined as VDD and VDD2. Each current is defined for each voltage domain
# separately. For example, current IDD0 is active-precharge current for
# voltage domain VDD and current IDD02 is active-precharge current for
# voltage domain VDD2.
# By default all currents are set to 0mA. Users who are only interested in
# the performance of DRAMs can leave them at 0.
# Operating 1 Bank Active-Precharge current
IDD0 = Param.Current("0mA", "Active precharge current")
# Operating 1 Bank Active-Precharge current multiple voltage Range
IDD02 = Param.Current("0mA", "Active precharge current VDD2")
# Precharge Power-down Current: Slow exit
IDD2P0 = Param.Current("0mA", "Precharge Powerdown slow")
# Precharge Power-down Current: Slow exit multiple voltage Range
IDD2P02 = Param.Current("0mA", "Precharge Powerdown slow VDD2")
# Precharge Power-down Current: Fast exit
IDD2P1 = Param.Current("0mA", "Precharge Powerdown fast")
# Precharge Power-down Current: Fast exit multiple voltage Range
IDD2P12 = Param.Current("0mA", "Precharge Powerdown fast VDD2")
# Precharge Standby current
IDD2N = Param.Current("0mA", "Precharge Standby current")
# Precharge Standby current multiple voltage range
IDD2N2 = Param.Current("0mA", "Precharge Standby current VDD2")
# Active Power-down current: slow exit
IDD3P0 = Param.Current("0mA", "Active Powerdown slow")
# Active Power-down current: slow exit multiple voltage range
IDD3P02 = Param.Current("0mA", "Active Powerdown slow VDD2")
# Active Power-down current : fast exit
IDD3P1 = Param.Current("0mA", "Active Powerdown fast")
# Active Power-down current : fast exit multiple voltage range
IDD3P12 = Param.Current("0mA", "Active Powerdown fast VDD2")
# Active Standby current
IDD3N = Param.Current("0mA", "Active Standby current")
# Active Standby current multiple voltage range
IDD3N2 = Param.Current("0mA", "Active Standby current VDD2")
# Burst Read Operating Current
IDD4R = Param.Current("0mA", "READ current")
# Burst Read Operating Current multiple voltage range
IDD4R2 = Param.Current("0mA", "READ current VDD2")
# Burst Write Operating Current
IDD4W = Param.Current("0mA", "WRITE current")
# Burst Write Operating Current multiple voltage range
IDD4W2 = Param.Current("0mA", "WRITE current VDD2")
# Refresh Current
IDD5 = Param.Current("0mA", "Refresh current")
# Refresh Current multiple voltage range
IDD52 = Param.Current("0mA", "Refresh current VDD2")
# Self-Refresh Current
IDD6 = Param.Current("0mA", "Self-refresh Current")
# Self-Refresh Current multiple voltage range
IDD62 = Param.Current("0mA", "Self-refresh Current VDD2")
# Main voltage range of the DRAM
VDD = Param.Voltage("0V", "Main Voltage Range")
# Second voltage range defined by some DRAMs
VDD2 = Param.Voltage("0V", "2nd Voltage Range")
# A single DDR3-1600 x64 channel (one command and address bus), with
# timings based on a DDR3-1600 4 Gbit datasheet (Micron MT41J512M8) in
# an 8x8 configuration.
class DDR3_1600_x64(DRAMCtrl):
# size of device in bytes
device_size = '512MB'
# 8x8 configuration, 8 devices each with an 8-bit interface
device_bus_width = 8
# DDR3 is a BL8 device
burst_length = 8
# Each device has a page (row buffer) size of 1 Kbyte (1K columns x8)
device_rowbuffer_size = '1kB'
# 8x8 configuration, so 8 devices
devices_per_rank = 8
# Use two ranks
ranks_per_channel = 2
# DDR3 has 8 banks in all configurations
banks_per_rank = 8
# 800 MHz
tCK = '1.25ns'
# 8 beats across an x64 interface translates to 4 clocks @ 800 MHz
tBURST = '5ns'
# DDR3-1600 11-11-11
tRCD = '13.75ns'
tCL = '13.75ns'
tRP = '13.75ns'
tRAS = '35ns'
tRRD = '6ns'
tXAW = '30ns'
activation_limit = 4
tRFC = '260ns'
tWR = '15ns'
# Greater of 4 CK or 7.5 ns
tWTR = '7.5ns'
# Greater of 4 CK or 7.5 ns
tRTP = '7.5ns'
# Default same rank rd-to-wr bus turnaround to 2 CK, @800 MHz = 2.5 ns
tRTW = '2.5ns'
# Default different rank bus delay to 2 CK, @800 MHz = 2.5 ns
tCS = '2.5ns'
# <=85C, half for >85C
tREFI = '7.8us'
# Current values from datasheet
IDD0 = '75mA'
IDD2N = '50mA'
IDD3N = '57mA'
IDD4W = '165mA'
IDD4R = '187mA'
IDD5 = '220mA'
VDD = '1.5V'
# A single HMC-2500 x32 model based on:
# [1] DRAMSpec: a high-level DRAM bank modelling tool
# developed at the University of Kaiserslautern. This high level tool
# uses RC (resistance-capacitance) and CV (capacitance-voltage) models to
# estimate the DRAM bank latency and power numbers.
# [2] A Logic-base Interconnect for Supporting Near Memory Computation in the
# Hybrid Memory Cube (E. Azarkhish et. al)
# Assumed for the HMC model is a 30 nm technology node.
# The modelled HMC consists of 4 Gbit layers which sum up to 2GB of memory (4
# layers).
# Each layer has 16 vaults and each vault consists of 2 banks per layer.
# In order to be able to use the same controller used for 2D DRAM generations
# for HMC, the following analogy is done:
# Channel (DDR) => Vault (HMC)
# device_size (DDR) => size of a single layer in a vault
# ranks per channel (DDR) => number of layers
# banks per rank (DDR) => banks per layer
# devices per rank (DDR) => devices per layer ( 1 for HMC).
# The parameters for which no input is available are inherited from the DDR3
# configuration.
# This configuration includes the latencies from the DRAM to the logic layer of
# the HMC
class HMC_2500_x32(DDR3_1600_x64):
# size of device
# two banks per device with each bank 4MB [2]
device_size = '8MB'
# 1x32 configuration, 1 device with 32 TSVs [2]
device_bus_width = 32
# HMC is a BL8 device [2]
burst_length = 8
# Each device has a page (row buffer) size of 256 bytes [2]
device_rowbuffer_size = '256B'
# 1x32 configuration, so 1 device [2]
devices_per_rank = 1
# 4 layers so 4 ranks [2]
ranks_per_channel = 4
# HMC has 2 banks per layer [2]
# Each layer represents a rank. With 4 layers and 8 banks in total, each
# layer has 2 banks; thus 2 banks per rank.
banks_per_rank = 2
# 1250 MHz [2]
tCK = '0.8ns'
# 8 beats across an x32 interface translates to 4 clocks @ 1250 MHz
tBURST = '3.2ns'
# Values using DRAMSpec HMC model [1]
tRCD = '10.2ns'
tCL = '9.9ns'
tRP = '7.7ns'
tRAS = '21.6ns'
# tRRD depends on the power supply network for each vendor.
# We assume a tRRD of a double bank approach to be equal to 4 clock
# cycles (Assumption)
tRRD = '3.2ns'
# activation limit is set to 0 since there are only 2 banks per vault layer.
activation_limit = 0
# Values using DRAMSpec HMC model [1]
tRFC = '59ns'
tWR = '8ns'
tRTP = '4.9ns'
# Default different rank bus delay assumed to 1 CK for TSVs, @1250 MHz = 0.8
# ns (Assumption)
tCS = '0.8ns'
# Value using DRAMSpec HMC model [1]
tREFI = '3.9us'
# Set default controller parameters
page_policy = 'close'
write_buffer_size = 8
read_buffer_size = 8
addr_mapping = 'RoCoRaBaCh'
min_writes_per_switch = 8
# A single DDR3-2133 x64 channel refining a selected subset of the
# options for the DDR-1600 configuration, based on the same DDR3-1600
# 4 Gbit datasheet (Micron MT41J512M8). Most parameters are kept
# consistent across the two configurations.
class DDR3_2133_x64(DDR3_1600_x64):
# 1066 MHz
tCK = '0.938ns'
# 8 beats across an x64 interface translates to 4 clocks @ 1066 MHz
tBURST = '3.752ns'
# DDR3-2133 14-14-14
tRCD = '13.09ns'
tCL = '13.09ns'
tRP = '13.09ns'
tRAS = '33ns'
tRRD = '5ns'
tXAW = '25ns'
# Current values from datasheet
IDD0 = '70mA'
IDD2N = '37mA'
IDD3N = '44mA'
IDD4W = '157mA'
IDD4R = '191mA'
IDD5 = '250mA'
VDD = '1.5V'
# A single DDR4-2400 x64 channel (one command and address bus), with
# timings based on a DDR4-2400 4 Gbit datasheet (Micron MT40A512M8)
# in an 8x8 configuration.
class DDR4_2400_x64(DRAMCtrl):
# size of device
device_size = '512MB'
# 8x8 configuration, 8 devices each with an 8-bit interface
device_bus_width = 8
# DDR4 is a BL8 device
burst_length = 8
# Each device has a page (row buffer) size of 1 Kbyte (1K columns x8)
device_rowbuffer_size = '1kB'
# 8x8 configuration, so 8 devices
devices_per_rank = 8
# Match our DDR3 configurations which is dual rank
ranks_per_channel = 2
# DDR4 has 2 (x16) or 4 (x4 and x8) bank groups
# Set to 4 for x4, x8 case
bank_groups_per_rank = 4
# DDR4 has 16 banks (4 bank groups) in all
# configurations. Currently we do not capture the additional
# constraints incurred by the bank groups
banks_per_rank = 16
# 1200 MHz
tCK = '0.833ns'
# 8 beats across an x64 interface translates to 4 clocks @ 1200 MHz
# tBURST is equivalent to the CAS-to-CAS delay (tCCD)
# With bank group architectures, tBURST represents the CAS-to-CAS
# delay for bursts to different bank groups (tCCD_S)
tBURST = '3.333ns'
# @2400 data rate, tCCD_L is 6 CK
# CAS-to-CAS delay for bursts to the same bank group
# tBURST is equivalent to tCCD_S; no explicit parameter required
# for CAS-to-CAS delay for bursts to different bank groups
tCCD_L = '5ns';
# DDR4-2400 17-17-17
tRCD = '14.16ns'
tCL = '14.16ns'
tRP = '14.16ns'
tRAS = '32ns'
# RRD_S (different bank group) for 1K page is MAX(4 CK, 3.3ns)
tRRD = '3.3ns'
# RRD_L (same bank group) for 1K page is MAX(4 CK, 4.9ns)
tRRD_L = '4.9ns';
tXAW = '21ns'
activation_limit = 4
tRFC = '350ns'
tWR = '15ns'
# Here using the average of WTR_S and WTR_L
tWTR = '5ns'
# Greater of 4 CK or 7.5 ns
tRTP = '7.5ns'
# Default same rank rd-to-wr bus turnaround to 2 CK, @1200 MHz = 1.666 ns
tRTW = '1.666ns'
# Default different rank bus delay to 2 CK, @1200 MHz = 1.666 ns
tCS = '1.666ns'
# <=85C, half for >85C
tREFI = '7.8us'
# Current values from datasheet
IDD0 = '64mA'
IDD02 = '4mA'
IDD2N = '50mA'
IDD3N = '67mA'
IDD3N2 = '3mA'
IDD4W = '180mA'
IDD4R = '160mA'
IDD5 = '192mA'
VDD = '1.2V'
VDD2 = '2.5V'
# A single LPDDR2-S4 x32 interface (one command/address bus), with
# default timings based on a LPDDR2-1066 4 Gbit part (Micron MT42L128M32D1)
# in a 1x32 configuration.
class LPDDR2_S4_1066_x32(DRAMCtrl):
# No DLL in LPDDR2
dll = False
# size of device
device_size = '512MB'
# 1x32 configuration, 1 device with a 32-bit interface
device_bus_width = 32
# LPDDR2_S4 is a BL4 and BL8 device
burst_length = 8
# Each device has a page (row buffer) size of 1KB
# (this depends on the memory density)
device_rowbuffer_size = '1kB'
# 1x32 configuration, so 1 device
devices_per_rank = 1
# Use a single rank
ranks_per_channel = 1
# LPDDR2-S4 has 8 banks in all configurations
banks_per_rank = 8
# 533 MHz
tCK = '1.876ns'
# Fixed at 15 ns
tRCD = '15ns'
# 8 CK read latency, 4 CK write latency @ 533 MHz, 1.876 ns cycle time
tCL = '15ns'
# Pre-charge one bank 15 ns (all banks 18 ns)
tRP = '15ns'
tRAS = '42ns'
tWR = '15ns'
tRTP = '7.5ns'
# 8 beats across an x32 DDR interface translates to 4 clocks @ 533 MHz.
# Note this is a BL8 DDR device.
# Requests larger than 32 bytes are broken down into multiple requests
# in the controller
tBURST = '7.5ns'
# LPDDR2-S4, 4 Gbit
tRFC = '130ns'
tREFI = '3.9us'
# Irrespective of speed grade, tWTR is 7.5 ns
tWTR = '7.5ns'
# Default same rank rd-to-wr bus turnaround to 2 CK, @533 MHz = 3.75 ns
tRTW = '3.75ns'
# Default different rank bus delay to 2 CK, @533 MHz = 3.75 ns
tCS = '3.75ns'
# Activate to activate irrespective of density and speed grade
tRRD = '10.0ns'
# Irrespective of density, tFAW is 50 ns
tXAW = '50ns'
activation_limit = 4
# Current values from datasheet
IDD0 = '15mA'
IDD02 = '70mA'
IDD2N = '2mA'
IDD2N2 = '30mA'
IDD3N = '2.5mA'
IDD3N2 = '30mA'
IDD4W = '10mA'
IDD4W2 = '190mA'
IDD4R = '3mA'
IDD4R2 = '220mA'
IDD5 = '40mA'
IDD52 = '150mA'
VDD = '1.8V'
VDD2 = '1.2V'
# A single WideIO x128 interface (one command and address bus), with
# default timings based on an estimated WIO-200 8 Gbit part.
class WideIO_200_x128(DRAMCtrl):
# No DLL for WideIO
dll = False
# size of device
device_size = '1024MB'
# 1x128 configuration, 1 device with a 128-bit interface
device_bus_width = 128
# This is a BL4 device
burst_length = 4
# Each device has a page (row buffer) size of 4KB
# (this depends on the memory density)
device_rowbuffer_size = '4kB'
# 1x128 configuration, so 1 device
devices_per_rank = 1
# Use one rank for a one-high die stack
ranks_per_channel = 1
# WideIO has 4 banks in all configurations
banks_per_rank = 4
# 200 MHz
tCK = '5ns'
# WIO-200
tRCD = '18ns'
tCL = '18ns'
tRP = '18ns'
tRAS = '42ns'
tWR = '15ns'
# Read to precharge is same as the burst
tRTP = '20ns'
# 4 beats across an x128 SDR interface translates to 4 clocks @ 200 MHz.
# Note this is a BL4 SDR device.
tBURST = '20ns'
# WIO 8 Gb
tRFC = '210ns'
# WIO 8 Gb, <=85C, half for >85C
tREFI = '3.9us'
# Greater of 2 CK or 15 ns, 2 CK @ 200 MHz = 10 ns
tWTR = '15ns'
# Default same rank rd-to-wr bus turnaround to 2 CK, @200 MHz = 10 ns
tRTW = '10ns'
# Default different rank bus delay to 2 CK, @200 MHz = 10 ns
tCS = '10ns'
# Activate to activate irrespective of density and speed grade
tRRD = '10.0ns'
# Two instead of four activation window
tXAW = '50ns'
activation_limit = 2
# The WideIO specification does not provide current information
# A single LPDDR3 x32 interface (one command/address bus), with
# default timings based on a LPDDR3-1600 4 Gbit part (Micron
# EDF8132A1MC) in a 1x32 configuration.
class LPDDR3_1600_x32(DRAMCtrl):
# No DLL for LPDDR3
dll = False
# size of device
device_size = '512MB'
# 1x32 configuration, 1 device with a 32-bit interface
device_bus_width = 32
# LPDDR3 is a BL8 device
burst_length = 8
# Each device has a page (row buffer) size of 4KB
device_rowbuffer_size = '4kB'
# 1x32 configuration, so 1 device
devices_per_rank = 1
# Technically the datasheet is a dual-rank package, but for
# comparison with the LPDDR2 config we stick to a single rank
ranks_per_channel = 1
# LPDDR3 has 8 banks in all configurations
banks_per_rank = 8
# 800 MHz
tCK = '1.25ns'
tRCD = '18ns'
# 12 CK read latency, 6 CK write latency @ 800 MHz, 1.25 ns cycle time
tCL = '15ns'
tRAS = '42ns'
tWR = '15ns'
# Greater of 4 CK or 7.5 ns, 4 CK @ 800 MHz = 5 ns
tRTP = '7.5ns'
# Pre-charge one bank 18 ns (all banks 21 ns)
tRP = '18ns'
# 8 beats across a x32 DDR interface translates to 4 clocks @ 800 MHz.
# Note this is a BL8 DDR device.
# Requests larger than 32 bytes are broken down into multiple requests
# in the controller
tBURST = '5ns'
# LPDDR3, 4 Gb
tRFC = '130ns'
tREFI = '3.9us'
# Irrespective of speed grade, tWTR is 7.5 ns
tWTR = '7.5ns'
# Default same rank rd-to-wr bus turnaround to 2 CK, @800 MHz = 2.5 ns
tRTW = '2.5ns'
# Default different rank bus delay to 2 CK, @800 MHz = 2.5 ns
tCS = '2.5ns'
# Activate to activate irrespective of density and speed grade
tRRD = '10.0ns'
# Irrespective of size, tFAW is 50 ns
tXAW = '50ns'
activation_limit = 4
# Current values from datasheet
IDD0 = '8mA'
IDD02 = '60mA'
IDD2N = '0.8mA'
IDD2N2 = '26mA'
IDD3N = '2mA'
IDD3N2 = '34mA'
IDD4W = '2mA'
IDD4W2 = '190mA'
IDD4R = '2mA'
IDD4R2 = '230mA'
IDD5 = '28mA'
IDD52 = '150mA'
VDD = '1.8V'
VDD2 = '1.2V'
# A single GDDR5 x64 interface, with
# default timings based on a GDDR5-4000 1 Gbit part (SK Hynix
# H5GQ1H24AFR) in a 2x32 configuration.
class GDDR5_4000_x64(DRAMCtrl):
# size of device
device_size = '128MB'
# 2x32 configuration, 1 device with a 32-bit interface
device_bus_width = 32
# GDDR5 is a BL8 device
burst_length = 8
# Each device has a page (row buffer) size of 2Kbits (256Bytes)
device_rowbuffer_size = '256B'
# 2x32 configuration, so 2 devices
devices_per_rank = 2
# assume single rank
ranks_per_channel = 1
# GDDR5 has 4 bank groups
bank_groups_per_rank = 4
# GDDR5 has 16 banks with 4 bank groups
banks_per_rank = 16
# 1000 MHz
tCK = '1ns'
# 8 beats across an x64 interface translates to 2 clocks @ 1000 MHz
# Data bus runs @2000 Mhz => DDR ( data runs at 4000 MHz )
# 8 beats at 4000 MHz = 2 beats at 1000 MHz
# tBURST is equivalent to the CAS-to-CAS delay (tCCD)
# With bank group architectures, tBURST represents the CAS-to-CAS
# delay for bursts to different bank groups (tCCD_S)
tBURST = '2ns'
# @1000MHz data rate, tCCD_L is 3 CK
# CAS-to-CAS delay for bursts to the same bank group
# tBURST is equivalent to tCCD_S; no explicit parameter required
# for CAS-to-CAS delay for bursts to different bank groups
tCCD_L = '3ns';
tRCD = '12ns'
# tCL is not directly found in datasheet and assumed equal tRCD
tCL = '12ns'
tRP = '12ns'
tRAS = '28ns'
# RRD_S (different bank group)
# RRD_S is 5.5 ns in datasheet.
# rounded to the next multiple of tCK
tRRD = '6ns'
# RRD_L (same bank group)
# RRD_L is 5.5 ns in datasheet.
# rounded to the next multiple of tCK
tRRD_L = '6ns'
tXAW = '23ns'
# tXAW < 4 x tRRD.
# Therefore, activation limit is set to 0
activation_limit = 0
tRFC = '65ns'
tWR = '12ns'
# Here using the average of WTR_S and WTR_L
tWTR = '5ns'
# Read-to-Precharge 2 CK
tRTP = '2ns'
# Assume 2 cycles
tRTW = '2ns'
# Default different rank bus delay to 2 CK, @1000 MHz = 2 ns
tCS = '2ns'
tREFI = '3.9us'
# Based on Micron MT41K128M16HA-15E at 1066Mbps (16 Meg x 16 x 8 banks).
# This is used in the Zedboard.
class MicronDDR3_1066_x32(DRAMCtrl):
device_size = "256MB"
device_bus_width = 32
burst_length = 8
devices_per_rank = 16 # 16 x 8 banks, so 16 devices.
ranks_per_channel = 1 # Single rank.
banks_per_rank = 8 # Per the name.
device_rowbuffer_size = '4kB'
tCL = '13.1ns'
tRCD = '13.1ns'
tRP = '13.1ns'
tRAS = '37.5ns'
tBURST = '7.5ns' # 8 bursts / 2 per clk = 4 clocks at 533MHz.
tRRD = '11.25ns' # 6 cycles.
tRFC = '161.25ns' # 86 cycles.
tXAW = '50ns' # 27 cycles.
tREFI = '7.8us'
tWTR = '7.5ns' # Greater of 4 CLKs or 7.5ns, and 4 CLKs = 7.5ns.
activation_limit = 4 # Guessing.
| |
#!/usr/bin/python
"""
Author: Jeremy M. Stober
Program: TILES.PY
Date: Monday, March 31 2008
Description: A simple CMAC implementation.
"""
import os, sys, getopt, pdb
from numpy import *
from numpy.random import *
import pylab
#import matplotlib.axes3d as axes3d
from mpl_toolkits.mplot3d import Axes3D
import pickle
import cmac.fast
pylab.ioff()
class CMAC(object):
def __init__(self, nlevels, quantization, beta):
self.nlevels = nlevels
self.quantization = quantization
self.weights = {}
self.beta = beta
def save(self,filename):
pickle.dump(self,open(filename,'wb'),pickle.HIGHEST_PROTOCAL)
def quantize_alt(self, vector):
quantized = []
for x in vector:
if x >= 0:
quantized.append(int(x / self.quantization))
else:
quantized.append(int((x - self.quantization + 1) / self.quantization))
points = []
for i in range(self.nlevels):
index = []
for x in quantized:
if x >= i:
index.append(x - (x - i) % self.nlevels)
else:
index.append(x + 1 + (i - (x + 1)) % self.nlevels - self.nlevels)
points.append(index)
return points
def quantize_fast(self, vector):
return cmac.fast.quantize(array(vector), self.nlevels, self.quantization)
def quantize(self, vector):
"""
Generate receptive field coordinates for each level of the CMAC.
"""
# some error checking to make sure that the input size doesn't change
if hasattr(self, 'input_size') and len(vector) != self.input_size:
raise ValueError, "Different input size in call to quantize!"
elif not hasattr(self, 'input_size'):
self.input_size = len(vector)
else:
pass
quantized = (array(vector) / self.quantization).astype(int)
#print quantized
coords = []
for i in range(self.nlevels):
# Note that the tile size is nlevels * quantization!
# Coordinates for this tile.
point = list(quantized - (quantized - i) % self.nlevels)
# Label the ith tile so that it gets hashed uniquely.
point.append(i)
coords.append(tuple(point)) # may not be needed
return coords
def response(self, vector, response, quantized = False):
"""
Train the CMAC.
"""
# Coordinates for each level tiling.
coords = None
if quantized == False:
coords = self.quantize(vector)
else:
coords = vector
# Use Python's own hashing for storing feature weights. If you
# roll your own you'll have to learn about Universal Hashing.
prediction = sum([self.weights.setdefault(pt, 0.0) for pt in coords]) / len(coords)
error = self.beta * (response - prediction)
for pt in coords:
self.weights[pt] += error
return prediction
def __len__(self):
return len(self.weights)
def eval(self, vector, quantized = False):
"""
Eval the CMAC.
"""
# Coordinates for each level tiling.
coords = None
if quantized == False:
coords = self.quantize(vector)
else:
coords = vector
return sum([self.weights.setdefault(pt, 0.0) for pt in coords]) / len(coords)
class TraceCMAC(CMAC):
"""
CMAC that can be easily plugged into TD learning with eligibility traces.
"""
init = 1.0
def __init__(self, nlevels, quantization, beta, decay, inc = 1.0, replace = True, init = 1.0):
# initialize parent class attributes
CMAC.__init__(self, nlevels, quantization, beta)
self.traces = {} # traces
self.decay = decay # decay parameter
self.inc = inc
self.replace = replace
self.init = init
def train(self, vector, delta):
coords = self.quantize(vector)
todelete = []
for (key,val) in self.traces.items():
self.traces[key] = self.decay * val
if self.traces[key] < 0.00000001:
todelete.append(key)
for key in todelete:
del self.traces[key]
# increment active traces
if self.replace:
for pt in coords:
self.traces[pt] = self.inc
else:
for pt in coords:
self.traces[pt] = self.inc + self.traces.setdefault(pt,0.0)
# update params
for (key, val) in self.traces.items():
self.weights[key] = self.weights.setdefault(key,self.init) + self.beta * delta * val
def eval(self, vector, quantized = False):
"""
Eval the CMAC.
"""
# Coordinates for each level tiling.
coords = None
if quantized == False:
coords = self.quantize(vector)
else:
coords = vector
return sum([self.weights.setdefault(pt, self.init) for pt in coords]) / len(coords)
def __len__(self):
return len(self.weights) + len(self.traces)
def reset(self):
self.traces = {}
def test(name):
if name == 'sin':
cmac = CMAC(32, .01, 0.1)
points = uniform(low=0,high=2*pi,size=1000)
responses = sin(points)
errors = []
for (point,response) in zip(points,response):
predicted = cmac.response(array([point]),response)
errors.append(abs(response - predicted))
#print point, response, predicted
points = uniform(low=0, high=2*pi, size=100)
actual = []
for point in points:
actual.append(cmac.eval(array([point])))
pylab.figure(1)
pylab.plot(points,actual, '.')
pylab.figure(2)
pylab.plot(errors)
pylab.show()
elif name == 'wave':
cmac = CMAC(32, .1, 0.01)
points = uniform(low=0,high=2*pi,size=(10000,2))
responses = sin(points[:,0]) + cos(points[:,1])
errors = []
for (point,response) in zip(points,responses):
predicted = cmac.response(point,response)
errors.append(abs(response - predicted))
#print point, response, predicted
fig1 = pylab.figure(1)
ax1 = Axes3D(fig1)
ax1.scatter3D(points[:,0], points[:,1], responses)
points = uniform(low=0,high=2*pi,size=(10000,2))
predictions = []
for point in points:
predictions.append(cmac.eval(point))
fig2 = pylab.figure(2)
ax2 = Axes3D(fig2)
ax2.scatter3D(points[:,0], points[:,1], predictions)
# print len(cmac)
# pylab.plot(errors)
pylab.show()
def main():
def usage():
print sys.argv[0] + "[-h] [-d]"
try:
(options, args) = getopt.getopt(sys.argv[1:], 'dh', ['help','debug'])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
for o, a in options:
if o in ('-h', '--help'):
usage()
sys.exit()
elif o in ('-d', '--debug'):
pdb.set_trace()
test('wave')
if __name__ == "__main__":
main()
| |
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from transformers import RemBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ..test_configuration_common import ConfigTester
from ..test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertModel,
)
class TFRemBertModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_input_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
input_embedding_size=18,
output_embedding_size=43,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_labels=3,
num_choices=4,
scope=None,
):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.hidden_size = 32
self.input_embedding_size = input_embedding_size
self.output_embedding_size = output_embedding_size
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = "gelu"
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = RemBertConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
input_embedding_size=self.input_embedding_size,
output_embedding_size=self.output_embedding_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
initializer_range=self.initializer_range,
return_dict=True,
)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def create_and_check_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFRemBertModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
inputs = [input_ids, input_mask]
result = model(inputs)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_causal_lm_base_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.is_decoder = True
model = TFRemBertModel(config=config)
inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
result = model(inputs)
inputs = [input_ids, input_mask]
result = model(inputs)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = TFRemBertModel(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
result = model(inputs)
inputs = [input_ids, input_mask]
result = model(inputs, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states)
# Also check the case where encoder outputs are not passed
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def create_and_check_causal_lm_model(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.is_decoder = True
model = TFRemBertForCausalLM(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
prediction_scores = model(inputs)["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size]
)
def create_and_check_causal_lm_model_as_decoder(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = TFRemBertForCausalLM(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
"encoder_hidden_states": encoder_hidden_states,
"encoder_attention_mask": encoder_attention_mask,
}
result = model(inputs)
inputs = [input_ids, input_mask]
result = model(inputs, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states)
prediction_scores = result["logits"]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size]
)
def create_and_check_causal_lm_model_past(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
):
config.is_decoder = True
model = TFRemBertForCausalLM(config=config)
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past_key_values = outputs.past_key_values
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and attn_mask
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
output_from_no_past = model(next_input_ids, output_hidden_states=True).hidden_states[0]
output_from_past = model(
next_tokens, past_key_values=past_key_values, output_hidden_states=True
).hidden_states[0]
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx]
output_from_past_slice = output_from_past[:, 0, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6)
def create_and_check_causal_lm_model_past_with_attn_mask(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
):
config.is_decoder = True
model = TFRemBertForCausalLM(config=config)
# create attention mask
half_seq_length = self.seq_length // 2
attn_mask_begin = tf.ones((self.batch_size, half_seq_length), dtype=tf.int32)
attn_mask_end = tf.zeros((self.batch_size, self.seq_length - half_seq_length), dtype=tf.int32)
attn_mask = tf.concat([attn_mask_begin, attn_mask_end], axis=1)
# first forward pass
outputs = model(input_ids, attention_mask=attn_mask, use_cache=True)
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
past_key_values = outputs.past_key_values
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).numpy() + 1
random_other_next_tokens = ids_tensor((self.batch_size, self.seq_length), config.vocab_size)
vector_condition = tf.range(self.seq_length) == (self.seq_length - random_seq_idx_to_change)
condition = tf.transpose(
tf.broadcast_to(tf.expand_dims(vector_condition, -1), (self.seq_length, self.batch_size))
)
input_ids = tf.where(condition, random_other_next_tokens, input_ids)
# append to next input_ids and
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
attn_mask = tf.concat(
[attn_mask, tf.ones((attn_mask.shape[0], 1), dtype=tf.int32)],
axis=1,
)
output_from_no_past = model(
next_input_ids,
attention_mask=attn_mask,
output_hidden_states=True,
).hidden_states[0]
output_from_past = model(
next_tokens, past_key_values=past_key_values, attention_mask=attn_mask, output_hidden_states=True
).hidden_states[0]
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx]
output_from_past_slice = output_from_past[:, 0, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-6)
def create_and_check_causal_lm_model_past_large_inputs(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
):
config.is_decoder = True
model = TFRemBertForCausalLM(config=config)
input_ids = input_ids[:1, :]
input_mask = input_mask[:1, :]
self.batch_size = 1
# first forward pass
outputs = model(input_ids, attention_mask=input_mask, use_cache=True)
past_key_values = outputs.past_key_values
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
output_hidden_states=True,
).hidden_states[0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
).hidden_states[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]
output_from_past_slice = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
):
config.add_cross_attention = True
model = TFRemBertForCausalLM(config=config)
input_ids = input_ids[:1, :]
input_mask = input_mask[:1, :]
encoder_hidden_states = encoder_hidden_states[:1, :, :]
encoder_attention_mask = encoder_attention_mask[:1, :]
self.batch_size = 1
# first forward pass
outputs = model(
input_ids,
attention_mask=input_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=True,
)
past_key_values = outputs.past_key_values
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = tf.concat([input_ids, next_tokens], axis=-1)
next_attention_mask = tf.concat([input_mask, next_attn_mask], axis=-1)
output_from_no_past = model(
next_input_ids,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_hidden_states=True,
).hidden_states[0]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
output_hidden_states=True,
).hidden_states[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1]))
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx]
output_from_past_slice = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3)
def create_and_check_for_masked_lm(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFRemBertForMaskedLM(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_for_sequence_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = TFRemBertForSequenceClassification(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_for_multiple_choice(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_choices = self.num_choices
model = TFRemBertForMultipleChoice(config=config)
multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))
multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))
multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))
inputs = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def create_and_check_for_token_classification(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
config.num_labels = self.num_labels
model = TFRemBertForTokenClassification(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_for_question_answering(
self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
):
model = TFRemBertForQuestionAnswering(config=config)
inputs = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
result = model(inputs)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
) = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class TFRemBertModelTest(TFModelTesterMixin, unittest.TestCase):
all_model_classes = (
(
TFRemBertModel,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertForMultipleChoice,
)
if is_tf_available()
else ()
)
test_head_masking = False
test_onnx = False
def setUp(self):
self.model_tester = TFRemBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=RemBertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
"""Test the base model"""
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_causal_lm_base_model(self):
"""Test the base model of the causal LM model
is_deocder=True, no cross_attention, no encoder outputs
"""
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_causal_lm_base_model(*config_and_inputs)
def test_model_as_decoder(self):
"""Test the base model as a decoder (of an encoder-decoder architecture)
is_deocder=True + cross_attention + pass encoder outputs
"""
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_causal_lm(self):
"""Test the causal LM model"""
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_causal_lm_model(*config_and_inputs)
def test_causal_lm_model_as_decoder(self):
"""Test the causal LM model as a decoder"""
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_causal_lm_model_as_decoder(*config_and_inputs)
def test_causal_lm_model_past(self):
"""Test causal LM model with `past_key_values`"""
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_causal_lm_model_past(*config_and_inputs)
def test_causal_lm_model_past_with_attn_mask(self):
"""Test the causal LM model with `past_key_values` and `attention_mask`"""
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_causal_lm_model_past_with_attn_mask(*config_and_inputs)
def test_causal_lm_model_past_with_large_inputs(self):
"""Test the causal LM model with `past_key_values` and a longer decoder sequence length"""
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_causal_lm_model_past_large_inputs(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
"""Similar to `test_causal_lm_model_past_with_large_inputs` but with cross-attention"""
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
@slow
def test_model_from_pretrained(self):
model = TFRemBertModel.from_pretrained("google/rembert")
self.assertIsNotNone(model)
@require_tf
class TFRemBertModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_model(self):
model = TFRemBertModel.from_pretrained("google/rembert")
input_ids = tf.constant([[312, 56498, 313, 2125, 313]])
segment_ids = tf.constant([[0, 0, 0, 1, 1]])
output = model(input_ids, token_type_ids=segment_ids, output_hidden_states=True)
hidden_size = 1152
expected_shape = [1, 5, hidden_size]
self.assertEqual(output["last_hidden_state"].shape, expected_shape)
expected_implementation = tf.constant(
[
[
[0.0754, -0.2022, 0.1904],
[-0.3354, -0.3692, -0.4791],
[-0.2314, -0.6729, -0.0749],
[-0.0396, -0.3105, -0.4234],
[-0.1571, -0.0525, 0.5353],
]
]
)
tf.debugging.assert_near(output["last_hidden_state"][:, :, :3], expected_implementation, atol=1e-4)
# Running on the original tf implementation gives slightly different results here.
# Not clear why this variations is present
# TODO: Find reason for discrepancy
# expected_original_implementation = [[
# [0.07630594074726105, -0.20146065950393677, 0.19107051193714142],
# [-0.3405614495277405, -0.36971670389175415, -0.4808273911476135],
# [-0.22587086260318756, -0.6656315922737122, -0.07844287157058716],
# [-0.04145475849509239, -0.3077218234539032, -0.42316967248916626],
# [-0.15887849032878876, -0.054529931396245956, 0.5356100797653198]
# ]]
| |
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import Http404, HttpResponse
from django.shortcuts import get_object_or_404, redirect
from django.template.loader import render_to_string
from django.template.response import TemplateResponse
from django.views.decorators.http import require_http_methods
from django.utils import timezone
from us_ignite.apps.forms import (ApplicationForm, ApplicationLinkFormSet,
MembershipForm, ApplicationMediaFormSet,
ApplicationMembershipFormSet)
from us_ignite.apps.models import (Application, ApplicationMembership,
ApplicationVersion, Domain, Page)
from us_ignite.actionclusters.models import (ActionCluster, ActionClusterMembership,
Domain as ACDomain)
from us_ignite.awards.models import ApplicationAward
from us_ignite.common import pagination, forms
from us_ignite.hubs.forms import HubAppMembershipForm
from us_ignite.hubs.models import HubAppMembership
from itertools import chain
APPS_SORTING_CHOICES = (
('', 'Select ordering'),
('created', 'Created (Oldest first)'),
('-created', 'Created (Recent first)'),
('stage', 'Stage (Ideas first)'),
('-stage', 'Stage (Completed first)'),
)
def get_stage_or_404(stage):
for pk, name in Application.STAGE_CHOICES:
if pk == int(stage):
return (pk, name)
raise Http404('Invalid stage.')
def app_list(request, domain=None, stage=None, filter_name=''):
"""Lists the published ``Applications``"""
extra_qs = {}
if domain:
# Validate domain is valid if provided:
extra_qs['domain'] = get_object_or_404(Domain, slug=domain)
filter_name = extra_qs['domain'].name
if stage:
# Validate stage is valid if provided:
pk, name = get_stage_or_404(stage)
extra_qs['stage'] = pk
filter_name = name
page_no = pagination.get_page_no(request.GET)
order_form = forms.OrderForm(
request.GET, order_choices=APPS_SORTING_CHOICES)
order_value = order_form.cleaned_data['order'] if order_form.is_valid() else ''
object_list_app = Application.objects.select_related('domain').filter(
status=Application.PUBLISHED, **extra_qs)
object_list_ac = ActionCluster.objects.select_related('domain').filter(
status=ActionCluster.PUBLISHED, **extra_qs)
object_list = list(chain(object_list_app, object_list_ac))
if order_value:
object_list = object_list.order_by(order_value)
featured_list_app = Application.objects.select_related('domain').filter(
status=Application.PUBLISHED, is_featured=True, **extra_qs)[:3]
featured_list_ac = ActionCluster.objects.select_related('domain').filter(
status=ActionCluster.PUBLISHED, is_featured=True, **extra_qs)[:3]
featured_list = list(chain(featured_list_app, featured_list_ac))[:3]
page = pagination.get_page(object_list, page_no)
context = {
'featured_list': featured_list,
'page': page,
'order': order_value,
'order_form': order_form,
'domain_list': Domain.objects.all(),
'stage_list': Application.STAGE_CHOICES,
'filter_name': filter_name,
'current_domain': domain,
'current_stage': int(stage) if stage else None,
}
return TemplateResponse(request, 'apps/object_list.html', context)
def get_app_for_user(slug, user):
"""Validates the user can access the given app."""
app = get_object_or_404(Application.active, slug__exact=slug)
# Application is published, no need for validation:
if app.is_visible_by(user):
return app
raise Http404
def get_award_list(app):
"""Returns the list of awards for an app."""
award_queryset = (ApplicationAward.objects
.select_related('award').filter(application=app))
return [a.award for a in award_queryset]
def get_hub_list(app):
"""Returns the list of hubs for an app."""
hub_queryset = app.hubappmembership_set.select_related('hub').all()
return [h.hub for h in hub_queryset]
def app_detail(request, slug):
app = get_app_for_user(slug, request.user)
related_list = (Application.active.filter(domain=app.domain)
.order_by('?')[:3])
context = {
'object': app,
'domain': app.domain,
'url_list': app.applicationurl_set.all(),
'media_list': app.applicationmedia_set.all(),
'feature_list': app.features.all(),
'member_list': app.members.select_related('profile').all(),
'hub_list': get_hub_list(app),
'related_list': related_list,
'award_list': get_award_list(app),
'can_edit': app.is_editable_by(request.user),
'is_owner': app.is_owned_by(request.user),
}
return TemplateResponse(request, 'apps/object_detail.html', context)
@login_required
def app_add(request):
"""View for adding an ``Application``."""
if request.method == 'POST':
form = ApplicationForm(request.POST, request.FILES)
if form.is_valid():
instance = form.save(commit=False)
instance.owner = request.user
instance.save()
form.save_m2m()
messages.success(
request, 'The application "%s" has been added.' % instance.name)
return redirect(instance.get_absolute_url())
else:
form = ApplicationForm()
context = {
'form': form,
}
return TemplateResponse(request, 'apps/object_add.html', context)
@login_required
def app_edit(request, slug):
app = get_object_or_404(Application.active, slug__exact=slug)
if not app.is_editable_by(request.user):
raise Http404
if request.method == 'POST':
form = ApplicationForm(request.POST, request.FILES, instance=app)
link_formset = ApplicationLinkFormSet(request.POST, instance=app)
image_formset = ApplicationMediaFormSet(
request.POST, request.FILES, instance=app)
if (form.is_valid() and link_formset.is_valid()
and image_formset.is_valid()):
instance = form.save()
link_formset.save()
image_formset.save()
messages.success(
request, 'The application "%s" has been updated.' % instance.name)
return redirect(instance.get_absolute_url())
else:
form = ApplicationForm(instance=app)
link_formset = ApplicationLinkFormSet(instance=app)
image_formset = ApplicationMediaFormSet(instance=app)
context = {
'object': app,
'form': form,
'link_formset': link_formset,
'image_formset': image_formset,
}
return TemplateResponse(request, 'apps/object_edit.html', context)
@require_http_methods(["POST"])
@login_required
def app_version_add(request, slug):
app = get_object_or_404(Application.active, slug__exact=slug)
if not app.is_editable_by(request.user):
raise Http404
previous = ApplicationVersion.objects.get_latest_version(app)
app_signature = app.get_signature()
old_signature = previous.get_signature() if previous else None
# Apps have the same content.
if old_signature == app_signature:
messages.success(request, 'Latest changes have been versioned already.')
else:
ApplicationVersion.objects.create_version(app)
messages.success(request, 'Application has been versioned.')
return redirect(app.get_absolute_url())
def app_version_detail(request, slug, version_slug):
app = get_app_for_user(slug, request.user)
# Determine if the slug provided is a valid version:
version = None
version_list = []
for version_obj in app.applicationversion_set.all():
if version_obj.slug == version_slug:
version = version_obj
else:
version_list.append(version_obj)
if not version:
raise Http404
context = {
'object': version,
'version_list': version_list,
'app': app,
}
return TemplateResponse(request, 'apps/object_version_detail.html', context)
def create_member(app, user):
"""Create a new member when it is unexistent and return it."""
membership, is_new = (ApplicationMembership.objects
.get_or_create(application=app, user=user))
return membership if is_new else None
@login_required
def app_membership(request, slug):
"""Adds collaborators to an application."""
app = get_object_or_404(
Application.active, slug__exact=slug)
if not app.is_owned_by(request.user):
raise Http404
if request.method == 'POST':
form = MembershipForm(request.POST)
formset = ApplicationMembershipFormSet(request.POST, instance=app)
if form.is_valid() and formset.is_valid():
for member in form.cleaned_data['collaborators']:
create_member(app, member)
formset.save()
messages.success(request, 'Membership successfully updated.')
return redirect(app.get_membership_url())
else:
form = MembershipForm()
formset = ApplicationMembershipFormSet(instance=app)
context = {
'object': app,
'form': form,
'formset': formset,
}
return TemplateResponse(request, 'apps/object_membership.html', context)
def apps_featured(request):
"""Shows the featured application page."""
page = get_object_or_404(Page, status=Page.FEATURED)
application_list = [a.application for a in page.pageapplication_set.all()]
context = {
'object': page,
'application_list': application_list,
}
return TemplateResponse(request, 'apps/featured.html', context)
def apps_featured_archive(request, slug):
page = get_object_or_404(Page, status=Page.PUBLISHED, slug__exact=slug)
application_list = [a.application for a in page.pageapplication_set.all()]
context = {
'object': page,
'application_list': application_list,
}
return TemplateResponse(request, 'apps/featured.html', context)
@login_required
def app_export(request, slug):
"""Generates an export of the current status of the application."""
app = get_object_or_404(Application.active, slug__exact=slug)
if not app.has_member(request.user):
raise Http404
context = {
'object': app,
'url_list': app.applicationurl_set.all(),
'image_list': app.applicationmedia_set.all(),
'feature_list': app.features.all(),
'member_list': app.members.select_related('profile').all(),
}
content = render_to_string('apps/export.txt', context)
response = HttpResponse(content, content_type='text/plain')
filename = '%s-export-%s' % (
app.slug, timezone.now().strftime("%Y%m%d-%H%M%S"))
response['Content-Disposition'] = (
'attachment; filename="%s.txt"' % filename)
response['Content-Length'] = len(response.content)
return response
def _get_membership_form(membership_list):
id_list = [m.hub.id for m in membership_list]
args = [{'hubs': id_list}] if id_list else []
return HubAppMembershipForm(*args)
def _update_membership(app, hub_list, membership_list):
# Remove any non selected hub membership:
for membership in membership_list:
if membership.hub not in hub_list:
membership.delete()
# Add any new Hub membership:
new_membership_list = []
return [_add_hub_membership(hub, app) for hub in hub_list]
def _add_hub_membership(hub, app):
"""Generates the hub membership."""
instance, is_new = HubAppMembership.objects.get_or_create(
hub=hub, application=app)
# Record the activity for this membership.
if is_new:
name = ('App %s has been registered as part of this '
'community.' % app.name)
extra_data = {
'url': app.get_absolute_url(),
'user': app.owner,
}
hub.record_activity(name, extra_data=extra_data)
return instance
@login_required
def app_hub_membership(request, slug):
"""View to manage the membership of an app to a hub."""
app = get_object_or_404(Application.active, slug__exact=slug)
if not app.is_editable_by(request.user):
raise Http404
# Determine existing membership:
app_hubs = app.hubappmembership_set.select_related('hub').all()
if request.method == 'POST':
form = HubAppMembershipForm(request.POST)
if form.is_valid():
hubs = form.cleaned_data['hubs']
_update_membership(app, hubs, app_hubs)
msg = 'Hub membership updated.'
messages.success(request, msg)
return redirect(app.get_absolute_url())
else:
form = _get_membership_form(app_hubs)
context = {
'object': app,
'form': form,
}
return TemplateResponse(
request, 'apps/object_hub_membership.html', context)
| |
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs all benchmarks in PerfKitBenchmarker.
All benchmarks in PerfKitBenchmarker export the following interface:
GetInfo: this returns, the name of the benchmark, the number of machines
required to run one instance of the benchmark, a detailed description
of the benchmark, and if the benchmark requires a scratch disk.
Prepare: this function takes a list of VMs as an input parameter. The benchmark
will then get all binaries required to run the benchmark and, if
required, create data files.
Run: this function takes a list of VMs as an input parameter. The benchmark will
then run the benchmark upon the machines specified. The function will
return a dictonary containing the results of the benchmark.
Cleanup: this function takes a list of VMs as an input parameter. The benchmark
will then return the machine to the state it was at before Prepare
was called.
PerfKitBenchmarker has following run stages: prepare, run, cleanup and all.
prepare: PerfKitBenchmarker will read command-line flags, decide which
benchmarks to run
and create necessary resources for each benchmark, including networks,
VMs, disks, keys and execute the Prepare function of each benchmark to
install necessary softwares, upload datafiles, etc and generate a
run_uri, which can be used to run benchmark multiple times.
run: PerfKitBenchmarker execute the Run function of each benchmark and collect
samples
generated. Publisher may publish these samples accourding to settings. Run
stage can be called multiple times with the run_uri generated by prepare
stage.
cleanup: PerfKitBenchmarker will run Cleanup function of each benchmark to
uninstall
softwares and delete data files. Then it will delete VMs, key files,
networks, disks generated in prepare stage.
all: PerfKitBenchmarker will run all above stages (prepare, run, cleanup). Any
resources
generated in prepare will be automatically deleted at last.
PerfKitBenchmarker won't
be able to rerun with exactly same VMs, networks, disks with the same
run_uri.
"""
import getpass
import logging
import sys
import uuid
from perfkitbenchmarker import benchmarks
from perfkitbenchmarker import benchmark_sets
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import disk
from perfkitbenchmarker import events
from perfkitbenchmarker import flags
from perfkitbenchmarker import log_util
from perfkitbenchmarker import static_virtual_machine
from perfkitbenchmarker import timing_util
from perfkitbenchmarker import traces
from perfkitbenchmarker import version
from perfkitbenchmarker import vm_util
from perfkitbenchmarker import windows_benchmarks
from perfkitbenchmarker.publisher import SampleCollector
STAGE_ALL = 'all'
STAGE_PREPARE = 'prepare'
STAGE_RUN = 'run'
STAGE_CLEANUP = 'cleanup'
LOG_FILE_NAME = 'pkb.log'
REQUIRED_INFO = ['scratch_disk', 'num_machines']
REQUIRED_EXECUTABLES = frozenset(['ssh', 'ssh-keygen', 'scp', 'openssl'])
FLAGS = flags.FLAGS
flags.DEFINE_list('ssh_options', [], 'Additional options to pass to ssh.')
flags.DEFINE_integer('parallelism', 1,
'The number of benchmarks to run in parallel.')
flags.DEFINE_list('benchmarks', [benchmark_sets.STANDARD_SET],
'Benchmarks and/or benchmark sets that should be run. The '
'default is the standard set. For more information about '
'benchmarks and benchmark sets, see the README and '
'benchmark_sets.py.')
flags.DEFINE_string('project', None, 'GCP project ID under which '
'to create the virtual machines')
flags.DEFINE_list(
'zones', [None],
'A list of zones within which to run PerfKitBenchmarker.'
' This is specific to the cloud provider you are running on. '
'If multiple zones are given, PerfKitBenchmarker will create 1 VM in '
'zone, until enough VMs are created as specified in each '
'benchmark.')
# TODO(user): note that this is currently very GCE specific. Need to create a
# module which can traslate from some generic types to provider specific
# nomenclature.
flags.DEFINE_string('machine_type', None, 'Machine '
'types that will be created for benchmarks that don\'t '
'require a particular type.')
flags.DEFINE_integer('num_vms', 1, 'For benchmarks which can make use of a '
'variable number of machines, the number of VMs to use.')
flags.DEFINE_string('image', None, 'Default image that will be '
'linked to the VM')
flags.DEFINE_integer('scratch_disk_size', 500, 'Size, in gb, for all scratch '
'disks, default is 500')
flags.DEFINE_string('run_uri', None, 'Name of the Run. If provided, this '
'should be alphanumeric and less than or equal to 10 '
'characters in length.')
flags.DEFINE_string('owner', getpass.getuser(), 'Owner name. '
'Used to tag created resources and performance records.')
flags.DEFINE_enum(
'log_level', log_util.INFO,
[log_util.DEBUG, log_util.INFO],
'The log level to run at.')
flags.DEFINE_enum(
'run_stage', STAGE_ALL,
[STAGE_ALL, STAGE_PREPARE, STAGE_RUN, STAGE_CLEANUP],
'The stage of perfkitbenchmarker to run. By default it runs all stages.')
flags.DEFINE_list('benchmark_config_pair', None,
'Benchmark and its config file pair, separated by :.')
flags.DEFINE_integer('duration_in_seconds', None,
'duration of benchmarks. '
'(only valid for mesh_benchmark)')
flags.DEFINE_string('static_vm_file', None,
'The file path for the Static Machine file. See '
'static_virtual_machine.py for a description of this file.')
flags.DEFINE_boolean('version', False, 'Display the version and exit.')
flags.DEFINE_boolean('v', False, 'Display the version and exit.')
flags.DEFINE_enum(
'scratch_disk_type', disk.STANDARD,
[disk.STANDARD, disk.REMOTE_SSD, disk.PIOPS, disk.LOCAL],
'Type for all scratch disks. The default is standard')
flags.DEFINE_integer('scratch_disk_iops', 1500,
'IOPS for Provisioned IOPS (SSD) volumes in AWS.')
flags.DEFINE_integer('num_striped_disks', 1,
'The number of disks to stripe together to form one '
'"logical" scratch disk. This defaults to 1 '
'(except with local disks), which means no striping. '
'When using local disks, they default to striping '
'all disks together.',
lower_bound=1)
flags.DEFINE_bool('install_packages', True,
'Override for determining whether packages should be '
'installed. If this is false, no packages will be installed '
'on any VMs. This option should probably only ever be used '
'if you have already created an image with all relevant '
'packages installed.')
# Support for using a proxy in the cloud environment.
flags.DEFINE_string('http_proxy', '',
'Specify a proxy for HTTP in the form '
'[user:passwd@]proxy.server:port.')
flags.DEFINE_string('https_proxy', '',
'Specify a proxy for HTTPS in the form '
'[user:passwd@]proxy.server:port.')
flags.DEFINE_string('ftp_proxy', '',
'Specify a proxy for FTP in the form '
'[user:passwd@]proxy.server:port.')
MAX_RUN_URI_LENGTH = 8
events.initialization_complete.connect(traces.RegisterAll)
# TODO(user): Consider moving to benchmark_spec.
def ValidateBenchmarkInfo(benchmark_info):
for required_key in REQUIRED_INFO:
if required_key not in benchmark_info:
logging.error('Benchmark information %s is corrupt. It does not contain'
'the key %s. Please add the specified key to the benchmark'
'info. Skipping benchmark.', benchmark_info, required_key)
# TODO(user): Raise error with info about the validation failure
return False
return True
def DoPreparePhase(benchmark, name, spec, timer):
"""Performs the Prepare phase of benchmark execution.
Args:
benchmark: The benchmark module.
name: A string containing the benchmark name.
spec: The BenchmarkSpec created for the benchmark.
timer: An IntervalTimer that measures the start and stop times of resource
provisioning and the benchmark module's Prepare function.
Returns:
The BenchmarkSpec created for the benchmark.
"""
logging.info('Preparing benchmark %s', name)
# Pickle the spec before we try to create anything so we can clean
# everything up on a second run if something goes wrong.
spec.PickleSpec()
try:
with timer.Measure('Resource Provisioning'):
spec.Prepare()
finally:
# Also pickle the spec after the resources are created so that
# we have a record of things like AWS ids. Otherwise we won't
# be able to clean them up on a subsequent run.
spec.PickleSpec()
with timer.Measure('Benchmark Prepare'):
benchmark.Prepare(spec)
def DoRunPhase(benchmark, name, spec, collector, timer):
"""Performs the Run phase of benchmark execution.
Args:
benchmark: The benchmark module.
name: A string containing the benchmark name.
spec: The BenchmarkSpec created for the benchmark.
collector: The SampleCollector object to add samples to.
timer: An IntervalTimer that measures the start and stop times of the
benchmark module's Run function.
"""
logging.info('Running benchmark %s', name)
events.before_phase.send(events.RUN_PHASE, benchmark_spec=spec)
try:
with timer.Measure('Benchmark Run'):
samples = benchmark.Run(spec)
finally:
events.after_phase.send(events.RUN_PHASE, benchmark_spec=spec)
collector.AddSamples(samples, name, spec)
def DoCleanupPhase(benchmark, name, spec, timer):
"""Performs the Cleanup phase of benchmark execution.
Args:
benchmark: The benchmark module.
name: A string containing the benchmark name.
spec: The BenchmarkSpec created for the benchmark.
timer: An IntervalTimer that measures the start and stop times of the
benchmark module's Cleanup function and resource teardown.
"""
logging.info('Cleaning up benchmark %s', name)
if spec.always_call_cleanup or any([vm.is_static for vm in spec.vms]):
with timer.Measure('Benchmark Cleanup'):
benchmark.Cleanup(spec)
with timer.Measure('Resource Teardown'):
spec.Delete()
def RunBenchmark(benchmark, collector, sequence_number, total_benchmarks):
"""Runs a single benchmark and adds the results to the collector.
Args:
benchmark: The benchmark module to be run.
collector: The SampleCollector object to add samples to.
sequence_number: The sequence number of when the benchmark was started
relative to the other benchmarks in the suite.
total_benchmarks: The total number of benchmarks in the suite.
"""
benchmark_info = benchmark.GetInfo()
if not ValidateBenchmarkInfo(benchmark_info):
return
benchmark_name = benchmark_info['name']
# Modify the logger prompt for messages logged within this function.
label_extension = '{}({}/{})'.format(
benchmark_name, sequence_number, total_benchmarks)
log_context = log_util.GetThreadLogContext()
with log_context.ExtendLabel(label_extension):
# Optional prerequisite checking.
check_prereqs = getattr(benchmark, 'CheckPrerequisites', None)
if check_prereqs:
try:
check_prereqs()
except:
logging.exception('Prerequisite check failed for %s', benchmark_name)
raise
end_to_end_timer = timing_util.IntervalTimer()
detailed_timer = timing_util.IntervalTimer()
spec = None
try:
with end_to_end_timer.Measure('End to End'):
if FLAGS.run_stage in [STAGE_ALL, STAGE_PREPARE]:
# It is important to create the spec outside of DoPreparePhase
# because if DoPreparePhase raises an exception, we still need
# a reference to the spec in order to delete it in the "finally"
# section below.
spec = benchmark_spec.BenchmarkSpec(benchmark_info)
DoPreparePhase(benchmark, benchmark_name, spec, detailed_timer)
else:
spec = benchmark_spec.BenchmarkSpec.GetSpecFromFile(benchmark_name)
if FLAGS.run_stage in [STAGE_ALL, STAGE_RUN]:
DoRunPhase(benchmark, benchmark_name, spec, collector, detailed_timer)
if FLAGS.run_stage in [STAGE_ALL, STAGE_CLEANUP]:
DoCleanupPhase(benchmark, benchmark_name, spec, detailed_timer)
# Add samples for any timed interval that was measured.
include_end_to_end = timing_util.EndToEndRuntimeMeasurementEnabled()
include_runtimes = timing_util.RuntimeMeasurementsEnabled()
include_timestamps = timing_util.TimestampMeasurementsEnabled()
if FLAGS.run_stage == STAGE_ALL:
collector.AddSamples(
end_to_end_timer.GenerateSamples(
include_runtime=include_end_to_end or include_runtimes,
include_timestamps=include_timestamps),
benchmark_name, spec)
collector.AddSamples(
detailed_timer.GenerateSamples(include_runtimes, include_timestamps),
benchmark_name, spec)
except Exception:
# Resource cleanup (below) can take a long time. Log the error to give
# immediate feedback, then re-throw.
logging.exception('Error during benchmark %s', benchmark_name)
# If the particular benchmark requests us to always call cleanup, do it
# here.
if (FLAGS.run_stage in [STAGE_ALL, STAGE_CLEANUP] and spec and
spec.always_call_cleanup):
DoCleanupPhase(benchmark, benchmark_name, spec, detailed_timer)
raise
finally:
if spec:
if FLAGS.run_stage in [STAGE_ALL, STAGE_CLEANUP]:
spec.Delete()
# Pickle spec to save final resource state.
spec.PickleSpec()
def _LogCommandLineFlags():
result = []
for flag in FLAGS.FlagDict().values():
if flag.present:
result.append(flag.Serialize())
logging.info('Flag values:\n%s', '\n'.join(result))
def RunBenchmarks(publish=True):
"""Runs all benchmarks in PerfKitBenchmarker.
Args:
publish: A boolean indicating whether results should be published.
Returns:
Exit status for the process.
"""
if FLAGS.version or FLAGS.v:
print version.VERSION
return
for executable in REQUIRED_EXECUTABLES:
if not vm_util.ExecutableOnPath(executable):
logging.error('Could not find required executable "%s".' % executable)
return 1
if FLAGS.run_uri is None:
if FLAGS.run_stage not in [STAGE_ALL, STAGE_PREPARE]:
# Attempt to get the last modified run directory.
run_uri = vm_util.GetLastRunUri()
if run_uri:
FLAGS.run_uri = run_uri
logging.warning(
'No run_uri specified. Attempting to run "%s" with --run_uri=%s.',
FLAGS.run_stage, FLAGS.run_uri)
else:
logging.error(
'No run_uri specified. Could not run "%s".', FLAGS.run_stage)
return 1
else:
FLAGS.run_uri = str(uuid.uuid4())[-8:]
elif not FLAGS.run_uri.isalnum() or len(FLAGS.run_uri) > MAX_RUN_URI_LENGTH:
logging.error('run_uri must be alphanumeric and less than or equal '
'to 10 characters in length.')
return 1
vm_util.GenTempDir()
log_util.ConfigureLogging(
stderr_log_level=log_util.LOG_LEVELS[FLAGS.log_level],
log_path=vm_util.PrependTempDir(LOG_FILE_NAME),
run_uri=FLAGS.run_uri)
logging.info('PerfKitBenchmarker version: %s', version.VERSION)
_LogCommandLineFlags()
if FLAGS.os_type == benchmark_spec.WINDOWS and not vm_util.RunningOnWindows():
logging.error('In order to run benchmarks on Windows VMs, you must be '
'running on Windows.')
return 1
vm_util.SSHKeyGen()
collector = SampleCollector()
events.initialization_complete.send(parsed_flags=FLAGS)
if FLAGS.static_vm_file:
with open(FLAGS.static_vm_file) as fp:
static_virtual_machine.StaticVirtualMachine.ReadStaticVirtualMachineFile(
fp)
if FLAGS.benchmark_config_pair:
# Convert benchmark_config_pair into a {benchmark_name: file_name}
# dictionary.
tmp_dict = {}
for config_pair in FLAGS.benchmark_config_pair:
pair = config_pair.split(':')
tmp_dict[pair[0]] = pair[1]
FLAGS.benchmark_config_pair = tmp_dict
try:
benchmark_list = benchmark_sets.GetBenchmarksFromFlags()
total_benchmarks = len(benchmark_list)
if FLAGS.parallelism > 1:
sequence_range = range(total_benchmarks, 0, -1)
args = [((benchmark, collector, sequence_counter, total_benchmarks), {})
for benchmark, sequence_counter
in zip(benchmark_list, sequence_range)]
vm_util.RunThreaded(
RunBenchmark, args, max_concurrent_threads=FLAGS.parallelism)
else:
sequence_range = range(1, total_benchmarks + 1)
for benchmark, sequence_counter in zip(benchmark_list, sequence_range):
RunBenchmark(benchmark, collector, sequence_counter, total_benchmarks)
finally:
if collector.samples:
collector.PublishSamples()
logging.info('Complete logs can be found at: %s',
vm_util.PrependTempDir(LOG_FILE_NAME))
if FLAGS.run_stage not in [STAGE_ALL, STAGE_CLEANUP]:
logging.info(
'To run again with this setup, please use --run_uri=%s', FLAGS.run_uri)
def _GenerateBenchmarkDocumentation():
"""Generates benchmark documentation to show in --help."""
benchmark_docs = []
for benchmark_module in (benchmarks.BENCHMARKS +
windows_benchmarks.BENCHMARKS):
benchmark_info = benchmark_module.BENCHMARK_INFO
vm_count = benchmark_info.get('num_machines') or 'variable'
scratch_disk_str = ''
if benchmark_info.get('scratch_disk'):
scratch_disk_str = ' with scratch volume'
name = benchmark_info['name']
if benchmark_module in windows_benchmarks.BENCHMARKS:
name += ' (Windows)'
benchmark_docs.append('%s: %s (%s VMs%s)' %
(name,
benchmark_info['description'],
vm_count,
scratch_disk_str))
return '\n\t'.join(benchmark_docs)
def Main(argv=sys.argv):
logging.basicConfig(level=logging.INFO)
# TODO: Verify if there is other way of appending additional help
# message.
# Inject more help documentation
# The following appends descriptions of the benchmarks and descriptions of
# the benchmark sets to the help text.
benchmark_sets_list = [
'%s: %s' %
(set_name, benchmark_sets.BENCHMARK_SETS[set_name]['message'])
for set_name in benchmark_sets.BENCHMARK_SETS]
sys.modules['__main__'].__doc__ = (
'PerfKitBenchmarker version: {version}\n\n{doc}\n'
'Benchmarks (default requirements):\n'
'\t{benchmark_doc}').format(
version=version.VERSION,
doc=__doc__,
benchmark_doc=_GenerateBenchmarkDocumentation())
sys.modules['__main__'].__doc__ += ('\n\nBenchmark Sets:\n\t%s'
% '\n\t'.join(benchmark_sets_list))
try:
argv = FLAGS(argv) # parse flags
except flags.FlagsError as e:
logging.error(
'%s\nUsage: %s ARGS\n%s', e, sys.argv[0], FLAGS)
sys.exit(1)
return RunBenchmarks()
| |
from tcga_encoder.models.vae.batcher_ABC import *
class DnaBatcher( TCGABatcherABC ):
def PostInitInit(self):
if self.data_dict.has_key("dna_genes"):
self.dna_genes = self.data_dict["dna_genes"]
self.dna_store = self.dna_store[self.dna_genes]
self.dna_dim = len(self.dna_genes)
self.dims_dict[DNA] = self.dna_dim
def CallBack( self, function_name, sess, cb_info ):
if function_name == "everything":
self.FillDna( sess, cb_info )
#self.TestFillZ( sess, cb_info )
#self.TrainFillZ( sess, cb_info )
self.SaveModel( sess, cb_info )
self.BatchEpoch( sess, cb_info )
self.TestEpoch( sess, cb_info )
self.ValEpoch( sess, cb_info )
self.VizEpochs( sess, cb_info )
self.VizModel( sess, cb_info )
def FillDna( self, sess, info_dict ):
epoch = info_dict[EPOCH]
# feed_dict = info_dict[TEST_FEED_DICT]
# impute_dict = info_dict[TEST_FEED_IMPUTATION]
#
# self.RunFillZ( epoch, sess, feed_dict, impute_dict, mode="TEST" )
feed_dict = info_dict[VAL_FEED_DICT]
impute_dict = info_dict[VAL_FEED_IMPUTATION]
self.RunFillDna( epoch, sess, feed_dict, impute_dict, mode="VAL" )
feed_dict = info_dict[BATCH_FEED_DICT]
impute_dict = info_dict[BATCH_FEED_IMPUTATION]
self.batch_ids = info_dict["batch_ids"]
self.RunFillDna( epoch, sess, feed_dict, impute_dict, mode="BATCH" )
for batch_ids in chunks( np.arange(len(self.train_barcodes)), 5000 ):
barcodes = self.train_barcodes[batch_ids]
impute_dict = self.FillBatch( barcodes, mode = "TRAIN" ) #self.NextBatch(batch_ids)
#impute_dict[BARCODES] = barcodes
self.batch_ids = batch_ids
train_feed_dict={}
self.network.FillFeedDict( train_feed_dict, impute_dict )
#batch = self.FillBatch( impute_dict[BARCODES], mode )
self.RunFillDna( epoch, sess, train_feed_dict, impute_dict, mode="TRAIN" )
def GetAlgoDictStuff(self):
pass
def SummarizeData(self):
print "Running: SummarizeData()"
self.dna_mean = self.data_store[self.DNA_keys[0]].loc[self.train_barcodes].mean(0)
self.dna_std = self.data_store[self.DNA_keys[0]].loc[self.train_barcodes].std(0)
self.dna_order = np.argsort( self.dna_mean.values )
self.tissue_statistics = {}
#pdb.set_trace()
tissue_names = self.train_tissue.columns
stats = np.zeros( (5,len(tissue_names)))
for t_idx, tissue in zip( range(len(tissue_names)),tissue_names ):
bcs = self.train_tissue.loc[self.train_tissue[tissue]==1].index.values
#pdb.set_trace()
#mirna=self.data_store[self.miRNA_key].loc[ bcs ]
self.tissue_statistics[ tissue ] = {}
self.tissue_statistics[ tissue ][ DNA ] = {}
self.tissue_statistics[ tissue ][ DNA ][ "mean"] = self.data_store[self.DNA_keys[0]].mean(0).fillna(0)
self.tissue_statistics[ tissue ][ DNA ][ "var"] = self.data_store[self.DNA_keys[0]].var(0).fillna(0)
try:
dna=self.data_store[self.DNA_keys[0]].loc[ bcs ]
self.tissue_statistics[ tissue ][ DNA ][ "mean"] = dna.mean(0).fillna(0)
self.tissue_statistics[ tissue ][ DNA ][ "var"] = dna.var(0).fillna(0)
except:
print "No DNA for %s"%(tissue)
# def MakeBarcodes(self):
# self.fill_source_store.open()
# #z_columns = ["z%d"%zidx for zidx in range(self.n_z)]
# #self.fill_store["Z/TRAIN/Z/mu"] = pd.DataFrame( np.zeros( (len(self.train_barcodes),self.n_z) ), index = self.train_barcodes, columns = z_columns )
# #self.fill_store["Z/TRAIN/Z/var"] = pd.DataFrame( np.zeros( (len(self.train_barcodes),self.n_z) ), index = self.train_barcodes, columns = z_columns )
#
# #pdb.set_trace()
#
# self.train_barcodes = self.fill_source_store["Z/TRAIN/Z/mu"].index.values
# self.validation_barcodes = self.fill_source_store["Z/VAL/Z/mu"].index.values
# self.test_barcodes = [] #self.fill_source_store["Z/TEST/Z/mu"].index.values
#
# obs_dna = self.data_store["/CLINICAL/observed"]["DNA"][ self.data_store["/CLINICAL/observed"]["DNA"] ==1 ]
# dna_barcodes = obs_dna.index.values
#
# self.train_barcodes = np.intersect1d( self.train_barcodes, dna_barcodes)
# self.validation_barcodes = np.intersect1d( self.validation_barcodes, dna_barcodes)
#
# self.fill_source_store.close()
def InitFillStore(self):
pass
def InitializeAnythingYouWant(self, sess, network ):
print "Running : InitializeAnythingYouWant"
self.selected_aucs = {}
input_sources = ["DNA"]
layers = ["dna_predictions"]
n_tissues = len(self.data_store[self.TISSUE_key].columns)
#self.data_store[self.TISSUE_key].loc[ batch_barcodes ]
m = self.dna_mean.values + 1e-5
beta_0 = np.log( m ) - np.log( 1.0 - m )
if np.any(np.isnan(beta_0)) or np.any(np.isinf(beta_0)):
pdb.set_trace()
# get log_alpha and log_beta values
for layer_name, input_name in zip( layers, input_sources ):
n_dims = self.dims_dict[ input_name ]
alpha = np.zeros( (self.n_z, n_dims ), dtype = float )
beta = np.zeros( (n_tissues, n_dims ), dtype = float )
for t_idx, tissue in zip( range( n_tissues), self.data_store[self.TISSUE_key].columns):
n_samples = self.train_tissue[ tissue ].sum()
m = self.tissue_statistics[ tissue ][ DNA ][ "mean"].values
beta[t_idx,:] = np.log( m + 1e-3 ) - np.log( 1.0 - m + 1e-3)
if np.any(np.isnan(beta[t_idx,:])) or np.any(np.isinf(beta[t_idx,:])):
pdb.set_trace()
#log_alpha = np.log( alpha + 0.001 ).astype(np.float32)
#log_beta = np.log( beta + 0.001).astype(np.float32)
#layer = network.GetLayer( layer_name )
#sess.run( tf.assign(layer.weights[0][0], log_alpha) )
#sess.run( tf.assign(layer.weights[1][0], log_beta) )
if 1:
if len(network.GetLayer( layer_name ).weights) == 2:
#
print "initialize as if log reg and tissue specific biases"
#pdb.set_trace()
try:
network.GetLayer( layer_name ).SetWeights( sess, [alpha, beta ])
except:
print "could not init bias weights"
else:
if network.GetLayer( layer_name ).biases is not None:
print "initialize with tissue specific biases"
try:
network.GetLayer( layer_name ).SetBiases( sess, [beta_0])
except:
print "could not init bias biases"
def StoreNames(self):
#self.latent_store_name = self.network_name + "_" + LATENT
#self.latent_store = OpenHdfStore(self.savedir, self.latent_store_name, mode=self.default_store_mode )
self.model_store_name = self.network_name + "_DNA_" + MODEL
#self.survival_store_name = self.network_name + "_" + SURVIVAL
# open in "a" mode
self.model_store = OpenHdfStore(self.savedir, self.model_store_name, mode="a" )
self.epoch_store_name = self.network_name + "_DNA_" + EPOCH
self.epoch_store = OpenHdfStore(self.savedir, self.epoch_store_name, mode=self.default_store_mode )
self.fill_store_name = self.network_name + "_DNA_" + FILL
self.fill_store = OpenHdfStore(self.savedir, self.fill_store_name, mode="a")
self.fill_source_store_name = self.network_name + "_" + FILL
self.fill_source_store = OpenHdfStore(self.savedir, self.fill_source_store_name, mode="r")
self.fill_source_store.close()
self.fill_store.close()
#self.latent_store.close()
self.model_store.close()
self.epoch_store.close()
def CloseAll(self):
self.data_store.close()
self.fill_source_store.close()
self.model_store.close()
self.epoch_store.close()
def MakeVizFilenames(self):
self.viz_filename_dna_batch_target = os.path.join( self.savedir, "dna_batch_target" )
self.viz_filename_dna_batch_predict = os.path.join( self.savedir, "dna_batch_predict" )
self.viz_filename_dna_aucs = os.path.join( self.savedir, "dna_aucs" )
self.viz_filename_lower_bound = os.path.join( self.savedir, "dna_lower_bound.png" )
self.viz_filename_error_sources_per_gene_fill = os.path.join( self.savedir, "dna_errors_fill.png" )
#self.viz_filename_weights = os.path.join( self.savedir, "weights_" )
self.viz_dna_weights = os.path.join( self.savedir, "dna_weights" )
def PlotLogPdf(self):
f = pp.figure()
#pdb.set_trace()
pp.plot( self.epoch_store["Batch"]["Epoch"].values, self.epoch_store["Batch"]["log p(x)"], 'bo-', lw=2 , label="Batch")
if self.n_test > 0:
pp.plot( self.epoch_store["Test"]["Epoch"].values, self.epoch_store["Test"]["log p(x)"], 'ro-', lw=2, label="Test" )
if self.n_val > 0:
pp.plot( self.epoch_store["Val"]["Epoch"].values, self.epoch_store["Val"]["log p(x)"], 'ro-', lw=2, label="Val" )
pp.legend( loc="lower right")
pp.xlabel("Epoch")
pp.ylabel("log p(x)")
pp.grid('on')
pp.savefig( self.viz_filename_lower_bound, dpi = 300, fmt="png", bbox_inches = "tight")
pp.close(f)
def FillDerivedPlaceholder( self, batch, layer_name, mode ):
if layer_name == "Z_input":
self.fill_source_store.open()
if mode == "BATCH" or mode == "TRAIN":
#pdb.set_trace()
batch_data_mu = self.fill_source_store["/Z/TRAIN/Z/mu"].loc[ batch["barcodes"] ]
batch_data_var = self.fill_source_store["/Z/TRAIN/Z/var"].loc[ batch["barcodes"] ]
n,d = batch_data_mu.values.shape
batch_data_values = batch_data_mu.values #+ np.sqrt(batch_data_var.values)*np.random.randn(n,d)
#batch_data = self.fill_source_store["/Z/TRAIN/Z/mu"].loc[ batch["barcodes"] ]
batch_data = pd.DataFrame(batch_data_values, index=batch_data_mu.index, columns=batch_data_mu.columns)
else:
#pdb.set_trace()
batch_data = self.fill_source_store["/Z/VAL/Z/mu"].loc[ batch["barcodes"] ]
nans = np.isnan( batch_data.values )
batch_data_values = batch_data.values
# if mode == "BATCH":
# batch_data_values = self.AddmiRnaNoise( batch_data.values, rate = 0.1 )
#
# batch[ layer_name ] = self.NormalizemiRnaInput( batch_data_values )
batch[ layer_name ] = batch_data
batch[ layer_name ][nans] = 0
self.fill_source_store.close()
# def PlotLogPdf(self, main_sources, prior_sources ):
# f = pp.figure()
# legends = []
# colours = "bgr"
# fill_colours = ["lightblue","lightgreen","lightred"]
# n_sources = len(main_sources)
# for idx,target_source, prior_source in zip( range(n_sources),main_sources,prior_sources):
# s = f.add_subplot(1,n_sources,idx+1)
# pp.plot( self.epoch_store[BATCH_SOURCE_LOGPDF]["Epoch"].values,
# self.epoch_store[BATCH_SOURCE_LOGPDF][target_source]/self.dims_dict[target_source], 's--', \
# color=self.source2mediumcolor[target_source], \
# mec=self.source2darkcolor[target_source], mew=1, \
# mfc=self.source2lightcolor[target_source], lw=1, \
# ms = 5, \
# alpha=0.75, \
# label="Batch (%0.4f)"%(self.epoch_store[BATCH_SOURCE_LOGPDF][prior_source].values[-1]/self.dims_dict[target_source]) )
# pp.plot( self.epoch_store[BATCH_SOURCE_LOGPDF]["Epoch"].values,
# self.epoch_store[BATCH_SOURCE_LOGPDF][prior_source]/self.dims_dict[prior_source], 's--', \
# color=self.source2mediumcolor[prior_source], \
# mec=self.source2darkcolor[prior_source], mew=1, \
# mfc=self.source2lightcolor[prior_source], lw=1, \
# ms = 5, \
# alpha=0.75, \
# label="Batch prior (%0.4f)"%(self.epoch_store[BATCH_SOURCE_LOGPDF][prior_source].values[-1]/self.dims_dict[prior_source]) )
#
# if self.n_test > 0:
# pp.plot( self.epoch_store[TEST_SOURCE_LOGPDF]["Epoch"].values, \
# self.epoch_store[TEST_SOURCE_LOGPDF][target_source]/self.dims_dict[target_source], 'o-', \
# color=self.source2mediumcolor[target_source],\
# mec=self.source2darkcolor[target_source], mew=2, \
# mfc=self.source2lightcolor[target_source], lw=3, \
# ms = 8, \
# label="Test (%0.4f)"%(self.epoch_store[TEST_SOURCE_LOGPDF][target_source].values[-1]/self.dims_dict[target_source]) )
# pp.plot( self.epoch_store[TEST_SOURCE_LOGPDF]["Epoch"].values, \
# self.epoch_store[TEST_SOURCE_LOGPDF][prior_source]/self.dims_dict[prior_source], 'o-', \
# color=self.source2mediumcolor[prior_source],\
# mec=self.source2darkcolor[prior_source], mew=2, \
# mfc=self.source2lightcolor[prior_source], lw=3, \
# ms = 8, \
# label="Test prior (%0.4f)"%(self.epoch_store[TEST_SOURCE_LOGPDF][prior_source].values[-1]/self.dims_dict[prior_source]) )
#
#
# if self.n_val > 0:
# pp.plot( self.epoch_store[VAL_SOURCE_LOGPDF]["Epoch"].values, \
# self.epoch_store[VAL_SOURCE_LOGPDF][target_source]/self.dims_dict[target_source], 'v-', \
# color=self.source2darkcolor[target_source],\
# mec=self.source2darkcolor[target_source], mew=2, \
# mfc=self.source2lightcolor[target_source], lw=3, \
# ms = 8, \
# label="Val (%0.4f)"%(self.epoch_store[VAL_SOURCE_LOGPDF][target_source].values[-1]/self.dims_dict[target_source]) )
# pp.plot( self.epoch_store[VAL_SOURCE_LOGPDF]["Epoch"].values, \
# self.epoch_store[VAL_SOURCE_LOGPDF][prior_source]/self.dims_dict[prior_source], 'v-', \
# color=self.source2darkcolor[prior_source],\
# mec=self.source2darkcolor[prior_source], mew=2, \
# mfc=self.source2lightcolor[prior_source], lw=3, \
# ms = 8, \
# label="Val prior (%0.4f)"%(self.epoch_store[VAL_SOURCE_LOGPDF][prior_source].values[-1]/self.dims_dict[prior_source]) )
#
#
# if idx==0:
# pp.ylabel("log p(x|z)") #%(target_source))
# pp.legend(loc="lower right")
# pp.title( "%s"%(target_source))
# pp.xlabel("Epoch")
#
# pp.grid('on')
# #pdb.set_trace()
#
# pp.savefig( self.viz_filename_log_pdf_sources_per_gene, dpi = 300, fmt="png", bbox_inches = "tight")
# pp.close(f)
# def PlotFillLogPdf(self,main_sources,prior_sources):
# f = pp.figure()
# legends = []
# colours = "bgr"
# fill_colours = ["lightblue","lightgreen","lightred"]
# n_sources = len(main_sources)
# for idx,target_source, prior_source in zip( range(n_sources),main_sources,prior_sources):
# s = f.add_subplot(1,n_sources,idx+1)
# pp.plot( self.epoch_store[BATCH_SOURCE_LOGPDF]["Epoch"].values,
# self.epoch_store[BATCH_SOURCE_LOGPDF][target_source]/self.dims_dict[target_source], 's--', \
# color=self.source2mediumcolor[target_source], \
# mec=self.source2darkcolor[target_source], mew=1, \
# mfc=self.source2lightcolor[target_source], lw=1, \
# ms = 5, \
# alpha=0.75, \
# label="Batch (%0.4f)"%(self.epoch_store[BATCH_SOURCE_LOGPDF][target_source].values[-1]/self.dims_dict[target_source]) )
# pp.plot( self.epoch_store[BATCH_SOURCE_LOGPDF]["Epoch"].values,
# self.epoch_store[BATCH_SOURCE_LOGPDF][prior_source]/self.dims_dict[prior_source], 's--', \
# color=self.source2mediumcolor[prior_source], \
# mec=self.source2darkcolor[prior_source], mew=1, \
# mfc=self.source2lightcolor[prior_source], lw=1, \
# ms = 5, \
# alpha=0.75, \
# label="Batch prior (%0.4f)"%(self.epoch_store[BATCH_SOURCE_LOGPDF][prior_source].values[-1]/self.dims_dict[prior_source]) )
#
# if self.n_test > 0:
# query1 = self.epoch_store[TEST_FILL_LOGLIK]["Target"] == target_source
# query2 = self.epoch_store[TEST_FILL_LOGLIK]["Target"] == prior_source
# query = query1#&query2
# loglik_df = self.epoch_store[TEST_FILL_LOGLIK][query]
# epochs = loglik_df["Epoch"].values
# loglik = loglik_df["LogLik"].values
# if len(loglik) == 0:
# continue
# pp.plot( epochs, loglik, 'o-', \
# color=self.source2darkcolor[target_source],\
# mec=self.source2darkcolor[target_source], mew=1, \
# mfc=self.source2lightcolor[target_source], lw=2, \
# ms = 8, \
# label="Test (%0.4f)"%(loglik[-1]) )
#
# query = query2#&query2
# loglik_df = self.epoch_store[TEST_FILL_LOGLIK][query]
# epochs = loglik_df["Epoch"].values
# loglik = loglik_df["LogLik"].values
# if len(loglik) == 0:
# continue
# pp.plot( epochs, loglik, 'o-', \
# color=self.source2darkcolor[prior_source],\
# mec=self.source2darkcolor[prior_source], mew=1, \
# mfc=self.source2lightcolor[prior_source], lw=2, \
# ms = 8, \
# label="Test prior (%0.4f)"%(loglik[-1]) )
#
# if self.n_val > 0:
# query1 = self.epoch_store[VAL_FILL_LOGLIK]["Target"] == target_source
# query2 = self.epoch_store[VAL_FILL_LOGLIK]["Target"] == prior_source
# query = query1#&query2
# loglik_df = self.epoch_store[VAL_FILL_LOGLIK][query]
# epochs = loglik_df["Epoch"].values
# loglik = loglik_df["LogLik"].values
# if len(loglik) == 0:
# continue
#
# pp.plot( epochs, loglik, 'v-', \
# color=self.source2mediumcolor[target_source],\
# mec=self.source2lightcolor[target_source], mew=1, \
# mfc=self.source2darkcolor[target_source], lw=2, \
# ms = 8, \
# label="Val (%0.4f)"%(loglik[-1]) )
# query = query2#&query2
# loglik_df = self.epoch_store[VAL_FILL_LOGLIK][query]
# epochs = loglik_df["Epoch"].values
# loglik = loglik_df["LogLik"].values
# if len(loglik) == 0:
# continue
#
# pp.plot( epochs, loglik, 'v-', \
# color=self.source2mediumcolor[prior_source],\
# mec=self.source2lightcolor[prior_source], mew=1, \
# mfc=self.source2darkcolor[prior_source], lw=2, \
# ms = 8, \
# label="Val prior (%0.4f)"%(loglik[-1]) )
#
#
# if idx==0:
# pp.ylabel("log p(x|z)") #%(target_source))
# pp.legend(loc="lower right")
# pp.title( "%s"%(target_source))
# pp.xlabel("Epoch")
#
# pp.grid('on')
# pp.savefig( self.viz_filename_log_pdf_sources_per_gene_fill, dpi = 300, fmt="png", bbox_inches = "tight")
# pp.close(f)
def PlotFillError(self,main_sources):
f = pp.figure(figsize=(12,10))
legends = []
n_sources = len(main_sources)
for idx,target_source in zip( range(n_sources),main_sources):
s = f.add_subplot(1,n_sources,idx+1)
inputs = "RNA+DNA+METH"
# query1 = self.epoch_store[BATCH_FILL_ERROR]["Target"] == target_source
# query = query1#&query2
# df = self.epoch_store[BATCH_FILL_ERROR][query]
# epochs = df["Epoch"].values
# loglik = df["Error"].values
# if len(loglik) == 0:
# continue
# pp.plot( epochs, loglik, 'o-', \
# color=self.source2lightcolor[target_source],\
# mec=self.source2mediumcolor[target_source], mew=1, \
# mfc=self.source2lightcolor[target_source], lw=2, \
# ms = 8, \
# label="Batch (%0.6f)"%(loglik[-1]) )
if self.n_test > 0:
query1 = self.epoch_store[TEST_FILL_ERROR]["Target"] == target_source
query = query1#&query2
df = self.epoch_store[TEST_FILL_ERROR][query]
epochs = df["Epoch"].values
loglik = df["Error"].values
if len(loglik) == 0:
continue
pp.plot( epochs, loglik, 'o-', \
color=self.source2darkcolor[target_source],\
mec=self.source2darkcolor[target_source], mew=1, \
mfc=self.source2lightcolor[target_source], lw=2, \
ms = 8, \
label="Test (%0.6f)"%(loglik[-1]) )
if self.n_val > 0:
query1 = self.epoch_store[VAL_FILL_ERROR]["Target"] == target_source
query = query1#&query2
df = self.epoch_store[VAL_FILL_ERROR][query]
epochs = df["Epoch"].values
loglik = df["Error"].values
if len(loglik) == 0:
continue
pp.plot( epochs, loglik, 'v-', \
color=self.source2mediumcolor[target_source],\
mec=self.source2darkcolor[target_source], mew=1, \
mfc=self.source2lightcolor[target_source], lw=2, \
ms = 8, \
label="Val (%0.6f)"%(loglik[-1]) )
if idx==0:
pp.ylabel("Error") #%(target_source))
pp.legend(loc="upper right")
pp.title( "%s"%(target_source))
pp.xlabel("Epoch")
pp.grid('on')
#pdb.set_trace()
pp.savefig( self.viz_filename_error_sources_per_gene_fill, dpi = 300, fmt="png", bbox_inches = "tight")
pp.close(f)
def VizEpochs(self, sess, info_dict ):
print "** VIZ Epochs"
main_sources = [DNA]
#prior_sources = [miRNA+"_b", RNA+"_b", METH+"_b"]
self.epoch_store.open()
self.PlotLogPdf()
#self.PlotLogPdf(main_sources,prior_sources)
#self.PlotFillLogPdf(main_sources,prior_sources)
self.PlotFillError(main_sources)
self.PlotAucs("VAL")
self.PlotAucs("TRAIN")
self.epoch_store.close()
pp.close('all')
def RunFillDna( self, epoch, sess, feed_dict, impute_dict, mode ):
print "COMPUTE Z-SPACE"
use_dna = False
use_rna = True
use_meth = True
use_mirna = True
barcodes = impute_dict[BARCODES]
batch = self.FillBatch( impute_dict[BARCODES], mode )
#not_observed = np.setdiff1d( self.input_sources, inputs2use )
#pdb.set_trace()
dna_expectation_tensor = self.network.GetLayer( "dna_predictions" ).expectation
dna_data = np.zeros( (len(barcodes),self.dna_dim) )
for idx,DNA_key in zip(range(len(self.DNA_keys)),self.DNA_keys):
batch_data = self.data_store[DNA_key].loc[ barcodes ].fillna( 0 ).values
dna_data += batch_data
dna_data = np.minimum(1.0,dna_data)
loglikes_data_as_matrix = self.network.loglikes_data_as_matrix
tensors = [dna_expectation_tensor]
tensor_names = ["dna_predictions"]
assert len(tensor_names)==len(tensors), "should be same number"
self.network.FillFeedDict( feed_dict, impute_dict )
#pdb.set_trace()
# rna_observed_query = batch[ INPUT_OBSERVATIONS ][:,self.observed_batch_order[RNA]] == 1
# meth_observed_query = batch[ INPUT_OBSERVATIONS ][:,self.observed_batch_order[METH]] == 1
# mirna_observed_query = batch[ INPUT_OBSERVATIONS ][:,self.observed_batch_order[miRNA]] == 1
dna_observed_query = batch[ INPUT_OBSERVATIONS ][:,self.observed_batch_order[DNA]] == 1
tensor2fill = []
tensor2fill.extend( [dna_expectation_tensor, loglikes_data_as_matrix["dna_predictions"] ] )
z_ids = [0,1]
# ---------
# RUN SESS
# ---------
self.network.FillFeedDict( feed_dict, batch )
tensor2fill_eval = sess.run( tensor2fill, feed_dict = feed_dict )
# ------
# FILL EVALUATION
# -----
dna_expectation = tensor2fill_eval[0]
dna_loglikelihood = tensor2fill_eval[1]
#pdb.set_trace()
self.WriteRunFillExpectation( epoch, DNA, barcodes, self.dna_genes, dna_observed_query, dna_expectation, dna_data, mode )
self.WriteRunFillLoglikelihood( epoch, DNA, barcodes[dna_observed_query], self.dna_genes, dna_loglikelihood, mode )
self.WriteAucs( epoch, DNA, barcodes, self.dna_genes, dna_observed_query, dna_expectation, dna_data, mode )
def WriteAucs( self, epoch, target, barcodes, columns, obs_query, X, Y, mode ):
#inputs = inputs2use[0]
#for s in inputs2use[1:]:
# inputs += "+%s"%(s)
#print "Running: WriteAucs"
self.fill_store.open()
if target == DNA:
#for channel in range(self.n_dna_channels):
s = "/AUC/%s/%s/"%(mode,target )
#self.fill_store[ s ] = pd.DataFrame( X, index = barcodes, columns = columns )
x_obs = X[obs_query,:] #.flatten()
y_obs = Y[obs_query,:] # .flatten()
auc = np.zeros( x_obs.shape[1] )
ok = np.zeros( x_obs.shape[1] )
for d_idx in xrange( x_obs.shape[1] ):
if y_obs[:,d_idx].sum()>2:
auc[d_idx] = roc_auc_score(y_obs[:,d_idx],x_obs[:,d_idx])
ok[d_idx] = 1
# else:
# auc[d_idx] = 1.0
#errors = 1.0-auc
#pdb.set_trace()
self.selected_aucs[s] = pp.find(ok)
#ok = pp.find(ok)
auc = auc[ self.selected_aucs[s] ]
columns = columns[ self.selected_aucs[s] ]
self.fill_store[ s ] = pd.DataFrame( auc.reshape((1,len(auc))), columns = columns )
#pdb.set_trace()
self.fill_store.close()
def PlotAucs( self, mode ):
self.fill_store.open()
#pdb.set_trace()
s = "/AUC/%s/%s/"%(mode,DNA )
f = pp.figure(figsize=(14,4))
ax=f.add_subplot(111)
df = self.fill_store[s]
I_local = np.argsort( np.squeeze(df.values))
#print s
#print "len(I_local) = ", len(I_local)
#pdb.set_trace()
I_global = self.selected_aucs[s][ I_local ]
#I = self.dna_order
mean = self.tissue_statistics[ self.validation_tissues[0] ][ DNA ][ "mean"]
sorted_mean = pd.DataFrame( np.squeeze(mean.values)[I_global].reshape((1,len(I_global))), columns = np.array(self.dna_mean.index.values)[I_global] )
sorted_all_mean = pd.DataFrame( np.squeeze(self.dna_mean.values)[I_global].reshape((1,len(I_global))), columns = np.array(self.dna_mean.index.values)[I_global] )
sorted = pd.DataFrame( np.squeeze(df.values)[I_local].reshape((1,len(I_local))), columns = np.array(df.columns)[I_local] )
#pdb.set_trace()
sorted_mean.T.plot(kind='bar',ax=ax, sharex=True)
sorted.T.plot(ax=ax)
sorted_all_mean.T.plot(kind='bar',ax=ax, fontsize=6, sharex=True)
sorted_mean.T.plot(kind='bar',ax=ax, fontsize=6, sharex=True)
pp.title( "mean = %0.3f median = %0.3f"%(df.values.mean(), np.median(df.values)))
pp.savefig( self.viz_filename_dna_aucs + "_%s.png"%(mode), fmt="png", bbox_inches = "tight", dpi=600)
self.fill_store.close()
def Epoch( self, epoch_key, sess, info_dict, epoch, feed_dict, impute_dict, mode ):
barcodes = impute_dict[BARCODES]
batch_tensor_evals = sess.run( self.network.batch_log_tensors, feed_dict = feed_dict )
# batch_counts = self.CountSourcesInDict( impute_dict )
#
# n_batch = []
# for source in self.arch_dict[TARGET_SOURCES]:
# n_batch.append( batch_counts[source] )
# n_batch = np.array(n_batch).astype(float)
#
n_batch_size = len(impute_dict[BARCODES])
#
# log_p_z = batch_tensor_evals[2]/float(n_batch_size)
# log_q_z = batch_tensor_evals[3]/float(n_batch_size)
#
# # normalize by nbr observed for each source
# log_p_source_z_values = batch_tensor_evals[4:]/n_batch
#
# #print np.sort(info_dict[BATCH_IDS])
# new_log_p_x_given_z = log_p_source_z_values.sum()
# lower_bound = log_p_z-log_q_z + new_log_p_x_given_z
new_values = [epoch]
new_values.extend( batch_tensor_evals )
new_values[1]/=n_batch_size
self.AddSeries( self.epoch_store, epoch_key, values = new_values, columns = self.network.batch_log_columns )
epoch_values = [epoch]
epoch_values.extend( batch_tensor_evals )
#epoch_columns = ['Epoch']
epoch_columns = self.network.batch_log_columns
#pdb.set_trace()
if mode == "BATCH":
self.AddSeries( self.epoch_store, BATCH_SOURCE_LOGPDF, values = epoch_values, columns = epoch_columns )
self.PrintRow( self.epoch_store, epoch_key )
elif mode == "TEST" and self.n_test>0:
self.AddSeries( self.epoch_store, TEST_SOURCE_LOGPDF, values = epoch_values, columns = epoch_columns )
self.PrintRow( self.epoch_store, epoch_key )
elif mode == "VAL" and self.n_val>0:
self.AddSeries( self.epoch_store, VAL_SOURCE_LOGPDF, values = epoch_values, columns = epoch_columns )
self.PrintRow( self.epoch_store, epoch_key )
def VizWeightsGeneric( self, sess, info_dict ):
print " -> Generic Viz"
self.model_store.open()
keys = self.model_store.keys()
old_layer = ""
needs_closing=False
for k in keys:
dum,layer_name, W_or_b, W_or_b_id = k.split("/")
if W_or_b == "b":
continue
#print "processing %s"%(k)
if old_layer != layer_name:
if needs_closing is True:
#print " closing figure, ",old_layer
pp.legend()
pp.suptitle(old_layer)
pp.savefig( self.viz_dna_weights + "%s.png"%old_layer, fmt="png", bbox_inches = "tight")
pp.close(fig_)
needs_closing = False
if W_or_b == "W":
#print " new figure"
fig_ = pp.figure()
ax1_ = fig_.add_subplot(121)
ax2_ = fig_.add_subplot(122)
needs_closing = True
if W_or_b == "W":
#print " adding weights, ",layer_name
W = np.squeeze( self.model_store[k].values ).flatten()
ax1_.hist( W, 20, normed=True, alpha=0.5, label = "%s/%s"%(layer_name,W_or_b_id) )
pp.grid('on')
ax2_.plot( np.sort(W), lw=2, alpha=0.85, label = "%s/%s"%(layer_name,W_or_b_id) )
pp.grid('on')
needs_closing = True
#pdb.set_trace()
old_layer = layer_name
if needs_closing:
#print " closing figure, ",old_layer
pp.legend()
pp.suptitle(old_layer)
pp.savefig( self.viz_dna_weights + "%s.png"%old_layer, fmt="png", bbox_inches = "tight")
pp.close(fig_)
needs_closing = False
# try:
# rec_rna_weights = self.model_store[ "/rec_hidden1/W/0" ].values.flatten()
# f = pp.figure()
# pp.hist( rec_rna_weights, 50, normed=True, alpha=0.5 )
# pp.grid('on')
# pp.savefig( self.viz_filename_weights_rec_rna, dpi = 300, fmt="png", bbox_inches = "tight")
# pp.close(f)
# except:
# print "** could not viz any model"
self.model_store.close()
pp.close('all')
def VizModel( self, sess, info_dict ):
print "** VIZ Model"
self.VizWeightsGeneric(sess, info_dict )
# def InitializeAnythingYouWant(self, sess, network ):
# pass
# print "Running : InitializeAnythingYouWant"
# input_sources = ["METH","RNA","miRNA"]
# layers = ["gen_meth_space_basic","gen_rna_space_basic","gen_mirna_space_basic"]
#
# n_tissues = len(self.data_store[self.TISSUE_key].columns)
# #self.data_store[self.TISSUE_key].loc[ batch_barcodes ]
#
# # get log_alpha and log_beta values
# for layer_name, input_name in zip( layers, input_sources ):
# n_dims = self.dims_dict[ input_name ]
#
# alpha = np.zeros( (n_tissues, n_dims ), dtype = float )
# beta = np.zeros( (n_tissues, n_dims ), dtype = float )
#
# for t_idx, tissue in zip( range( n_tissues), self.data_store[self.TISSUE_key].columns):
#
# n_samples = self.train_tissue[ tissue ].sum()
# alpha[t_idx,:] = self.tissue_statistics[ tissue ][ input_name ][ "alpha"]
# beta[t_idx,:] = self.tissue_statistics[ tissue ][ input_name ][ "beta"]
#
# log_alpha = np.log( alpha + 0.001 ).astype(np.float32)
# log_beta = np.log( beta + 0.001).astype(np.float32)
#
# #layer = network.GetLayer( layer_name )
#
# #sess.run( tf.assign(layer.weights[0][0], log_alpha) )
# #sess.run( tf.assign(layer.weights[1][0], log_beta) )
# network.GetLayer( layer_name ).SetWeights( sess, [log_alpha, log_beta ])
# #pdb.set_trace()
| |
"Base Cache class."
import time
import warnings
from django.core.exceptions import ImproperlyConfigured
from django.utils.module_loading import import_string
class InvalidCacheBackendError(ImproperlyConfigured):
pass
class CacheKeyWarning(RuntimeWarning):
pass
class InvalidCacheKey(ValueError):
pass
# Stub class to ensure not passing in a `timeout` argument results in
# the default timeout
DEFAULT_TIMEOUT = object()
# Memcached does not accept keys longer than this.
MEMCACHE_MAX_KEY_LENGTH = 250
def default_key_func(key, key_prefix, version):
"""
Default function to generate keys.
Construct the key used by all other methods. By default, prepend
the `key_prefix`. KEY_FUNCTION can be used to specify an alternate
function with custom key making behavior.
"""
return '%s:%s:%s' % (key_prefix, version, key)
def get_key_func(key_func):
"""
Function to decide which key function to use.
Default to ``default_key_func``.
"""
if key_func is not None:
if callable(key_func):
return key_func
else:
return import_string(key_func)
return default_key_func
class BaseCache:
_missing_key = object()
def __init__(self, params):
timeout = params.get('timeout', params.get('TIMEOUT', 300))
if timeout is not None:
try:
timeout = int(timeout)
except (ValueError, TypeError):
timeout = 300
self.default_timeout = timeout
options = params.get('OPTIONS', {})
max_entries = params.get('max_entries', options.get('MAX_ENTRIES', 300))
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get('cull_frequency', options.get('CULL_FREQUENCY', 3))
try:
self._cull_frequency = int(cull_frequency)
except (ValueError, TypeError):
self._cull_frequency = 3
self.key_prefix = params.get('KEY_PREFIX', '')
self.version = params.get('VERSION', 1)
self.key_func = get_key_func(params.get('KEY_FUNCTION'))
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
"""
Return the timeout value usable by this backend based upon the provided
timeout.
"""
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
elif timeout == 0:
# ticket 21147 - avoid time.time() related precision issues
timeout = -1
return None if timeout is None else time.time() + timeout
def make_key(self, key, version=None):
"""
Construct the key used by all other methods. By default, use the
key_func to generate a key (which, by default, prepends the
`key_prefix' and 'version'). A different key function can be provided
at the time of cache construction; alternatively, you can subclass the
cache backend to provide custom key making behavior.
"""
if version is None:
version = self.version
return self.key_func(key, self.key_prefix, version)
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a value in the cache if the key does not already exist. If
timeout is given, use that timeout for the key; otherwise use the
default cache timeout.
Return True if the value was stored, False otherwise.
"""
raise NotImplementedError('subclasses of BaseCache must provide an add() method')
def get(self, key, default=None, version=None):
"""
Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
"""
raise NotImplementedError('subclasses of BaseCache must provide a get() method')
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a value in the cache. If timeout is given, use that timeout for the
key; otherwise use the default cache timeout.
"""
raise NotImplementedError('subclasses of BaseCache must provide a set() method')
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
"""
Update the key's expiry time using timeout. Return True if successful
or False if the key does not exist.
"""
raise NotImplementedError('subclasses of BaseCache must provide a touch() method')
def delete(self, key, version=None):
"""
Delete a key from the cache and return whether it succeeded, failing
silently.
"""
raise NotImplementedError('subclasses of BaseCache must provide a delete() method')
def get_many(self, keys, version=None):
"""
Fetch a bunch of keys from the cache. For certain backends (memcached,
pgsql) this can be *much* faster when fetching multiple values.
Return a dict mapping each key in keys to its value. If the given
key is missing, it will be missing from the response dict.
"""
d = {}
for k in keys:
val = self.get(k, self._missing_key, version=version)
if val is not self._missing_key:
d[k] = val
return d
def get_or_set(self, key, default, timeout=DEFAULT_TIMEOUT, version=None):
"""
Fetch a given key from the cache. If the key does not exist,
add the key and set it to the default value. The default value can
also be any callable. If timeout is given, use that timeout for the
key; otherwise use the default cache timeout.
Return the value of the key stored or retrieved.
"""
val = self.get(key, self._missing_key, version=version)
if val is self._missing_key:
if callable(default):
default = default()
self.add(key, default, timeout=timeout, version=version)
# Fetch the value again to avoid a race condition if another caller
# added a value between the first get() and the add() above.
return self.get(key, default, version=version)
return val
def has_key(self, key, version=None):
"""
Return True if the key is in the cache and has not expired.
"""
return self.get(key, self._missing_key, version=version) is not self._missing_key
def incr(self, key, delta=1, version=None):
"""
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
value = self.get(key, self._missing_key, version=version)
if value is self._missing_key:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
self.set(key, new_value, version=version)
return new_value
def decr(self, key, delta=1, version=None):
"""
Subtract delta from value in the cache. If the key does not exist, raise
a ValueError exception.
"""
return self.incr(key, -delta, version=version)
def __contains__(self, key):
"""
Return True if the key is in the cache and has not expired.
"""
# This is a separate method, rather than just a copy of has_key(),
# so that it always has the same functionality as has_key(), even
# if a subclass overrides it.
return self.has_key(key)
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a bunch of values in the cache at once from a dict of key/value
pairs. For certain backends (memcached), this is much more efficient
than calling set() multiple times.
If timeout is given, use that timeout for the key; otherwise use the
default cache timeout.
On backends that support it, return a list of keys that failed
insertion, or an empty list if all keys were inserted successfully.
"""
for key, value in data.items():
self.set(key, value, timeout=timeout, version=version)
return []
def delete_many(self, keys, version=None):
"""
Delete a bunch of values in the cache at once. For certain backends
(memcached), this is much more efficient than calling delete() multiple
times.
"""
for key in keys:
self.delete(key, version=version)
def clear(self):
"""Remove *all* values from the cache at once."""
raise NotImplementedError('subclasses of BaseCache must provide a clear() method')
def validate_key(self, key):
"""
Warn about keys that would not be portable to the memcached
backend. This encourages (but does not force) writing backend-portable
cache code.
"""
for warning in memcache_key_warnings(key):
warnings.warn(warning, CacheKeyWarning)
def incr_version(self, key, delta=1, version=None):
"""
Add delta to the cache version for the supplied key. Return the new
version.
"""
if version is None:
version = self.version
value = self.get(key, self._missing_key, version=version)
if value is self._missing_key:
raise ValueError("Key '%s' not found" % key)
self.set(key, value, version=version + delta)
self.delete(key, version=version)
return version + delta
def decr_version(self, key, delta=1, version=None):
"""
Subtract delta from the cache version for the supplied key. Return the
new version.
"""
return self.incr_version(key, -delta, version)
def close(self, **kwargs):
"""Close the cache connection"""
pass
def memcache_key_warnings(key):
if len(key) > MEMCACHE_MAX_KEY_LENGTH:
yield (
'Cache key will cause errors if used with memcached: %r '
'(longer than %s)' % (key, MEMCACHE_MAX_KEY_LENGTH)
)
for char in key:
if ord(char) < 33 or ord(char) == 127:
yield (
'Cache key contains characters that will cause errors if '
'used with memcached: %r' % key
)
break
| |
import re
import json
import urlparse
import random
import humanize
import time
from datetime import datetime, timedelta
from holster.enum import Enum
from holster.emitter import Priority
from disco.bot import CommandLevels
from disco.types.base import UNSET, cached_property
from disco.util.sanitize import S
from disco.api.http import APIException
from rowboat.redis import rdb
from rowboat.util.stats import timed
from rowboat.util.leakybucket import LeakyBucket
from rowboat.util.zalgo import ZALGO_RE
from rowboat.plugins import RowboatPlugin as Plugin
from rowboat.types import SlottedModel, Field, ListField, DictField, ChannelField, snowflake, lower
from rowboat.types.plugin import PluginConfig
from rowboat.models.message import Message
from rowboat.models.user import Infraction
from rowboat.plugins.modlog import Actions
from rowboat.constants import INVITE_LINK_RE, URL_RE
CensorReason = Enum(
'INVITE',
'DOMAIN',
'WORD',
'ZALGO',
'LENGTH',
'RAID'
)
class AntiraidSubConfig(SlottedModel):
key_duration = Field(int, default=600)
interval = Field(int, default=60)
count = Field(int, default=5)
lockdown_duration = Field(int, default=600)
raidrole = Field(snowflake)
notifyrole = Field(snowflake)
class CensorSubConfig(SlottedModel):
filter_zalgo = Field(bool, default=True)
zalgo_channel_whitelist = ListField(snowflake)
filter_invites = Field(bool, default=True)
invites_guild_whitelist = ListField(snowflake, default=[])
invites_whitelist = ListField(lower, default=[])
invites_blacklist = ListField(lower, default=[])
invites_channel_whitelist = ListField(snowflake)
filter_domains = Field(bool, default=True)
domains_whitelist = ListField(lower, default=[])
domains_blacklist = ListField(lower, default=[])
domains_channel_whitelist = ListField(snowflake)
blocked_words = ListField(lower, default=[])
blocked_tokens = ListField(lower, default=[])
words_channel_whitelist = ListField(snowflake)
blocked_nicknames = ListField(lower, default=[])
block_zalgo_nicknames = Field(bool, default=False)
message_char_limit = Field(int, default=0)
char_limit_channel_whitelist = ListField(snowflake)
warn_on_censor = Field(bool, default=False)
mute_violations = Field(bool, default=False)
mute_violations_interval = Field(int, default=10)
mute_violations_count = Field(int, default=3)
mute_violations_duration = Field(int, default=300)
antiraid = Field(AntiraidSubConfig, default=None)
_cached_max_invite_bucket = Field(str, private=True)
_cached_max_domain_bucket = Field(str, private=True)
_cached_max_word_bucket = Field(str, private=True)
_cached_max_zalgo_bucket = Field(str, private=True)
_cached_max_length_bucket = Field(str, private=True)
# Unicode support added, thanks Xenthys
@cached_property
def blocked_re(self):
return re.compile(u'({})'.format(u'|'.join(
map(re.escape, self.blocked_tokens) +
map(lambda k: u'\\b{}\\b'.format(re.escape(k)), self.blocked_words)
)), re.I + re.U)
@cached_property
def blockednick_re(self):
return re.compile('{}'.format('|'.join(
map(lambda k: '{}'.format(re.escape(k)), self.blocked_nicknames)
)), re.I + re.U)
def get_bucket(self, attr, guild_id):
if not self.mute_violations:
return (None)
bucket = getattr(self, '_cached_{}_bucket'.format(attr), None)
if not bucket:
bucket = LeakyBucket(rdb, 'censor:{}:{}:{}'.format(attr, guild_id, '{}'), self.mute_violations_count, self.mute_violations_interval * 1000)
setattr(self, '_cached_{}_bucket'.format(attr), bucket)
return bucket
class CensorConfig(PluginConfig):
levels = DictField(int, CensorSubConfig)
channels = DictField(ChannelField, CensorSubConfig)
def compute_relevant_rules(self, event, level):
if self.channels:
for chn in self.channels.keys():
if chn == event.channel.id:
yield self.channels[chn]
if self.levels:
for lvl in self.levels.keys():
if level <= lvl:
yield self.levels[lvl]
# It's bad kids!
class Censorship(Exception):
def __init__(self, reason, event, ctx):
self.reason = reason
self.event = event
self.ctx = ctx
self.content = S(event.content, escape_codeblocks=True)
@property
def details(self):
msg = ''
if self.reason is CensorReason.INVITE:
if self.ctx['guild']:
msg = u'invite `{}` to {}'.format(
self.ctx['invite'].replace('gg/', 'gg\\'),
S(self.ctx['guild']['name'], escape_codeblocks=True)
)
else:
msg = u'invite `{}`'.format(self.ctx['invite'].replace('gg/', 'gg\\'))
elif self.reason is CensorReason.DOMAIN:
if self.ctx['hit'] == 'whitelist':
msg = u'Domain `{}` is not in whitelist'.format(S(self.ctx['domain'], escape_codeblocks=True))
else:
msg = u'Domain `{}` is in blacklist'.format(S(self.ctx['domain'], escape_codeblocks=True))
elif self.reason is CensorReason.WORD:
msg = u'Found blacklisted words `{}`'.format(
u', '.join([S(i, escape_codeblocks=True) for i in self.ctx['words']]))
elif self.reason is CensorReason.ZALGO:
msg = u'Found zalgo at position `{}` in text'.format(
self.ctx['position']
)
elif self.reason is CensorReason.LENGTH:
msg = u'Message was {} characters, {} allowed'.format(self.ctx['length'], self.ctx['allowed'])
if self.ctx['warn_on_censor'] and not rdb.exists('censorid:{}'.format(self.event.id)):
# Store the event ID to get around weird race condition that seems to occur in busy guilds
rdb.setex('censorid:{}'.format(self.event.id), self.event.member.id, 15)
Infraction.warn(self.ctx['s'], self.event, self.event.member, 'Censor: {}'.format(str(msg).decode('latin-1')), guild=self.event.member.guild)
return msg
class Violation(Exception):
def __init__(self, rule, event, member, label, msg, **info):
self.rule = rule
self.event = event
self.member = member
self.label = label
self.msg = msg
self.info = info
@Plugin.with_config(CensorConfig)
class CensorPlugin(Plugin):
global idlist
idlist = []
def compute_relevant_configs(self, event, author):
if hasattr(event, 'channel_id'):
if event.channel_id in event.config.channels:
yield event.config.channels[event.channel.id]
if event.config.levels:
user_level = int(self.bot.plugins.get('CorePlugin').get_level(event.guild, author))
for level, config in event.config.levels.items():
if user_level <= level:
yield config
def violate(self, violation):
key = 'cv:{e.member.guild_id}:{e.member.id}'.format(e=violation.event)
last_violated = int(rdb.get(key) or 0)
rdb.setex('cv:{e.member.guild_id}:{e.member.id}'.format(e=violation.event), int(time.time()), 60)
if not last_violated > time.time() - 10:
self.call(
'ModLogPlugin.log_action_ext',
Actions.CENSOR_DEBUG,
violation.event.guild.id,
v=violation
)
punishment_duration = violation.rule.mute_violations_duration or violation.check.mute_violations_duration
Infraction.tempmute(
self,
violation.event,
violation.member,
'To many censor violations',
datetime.utcnow() + timedelta(seconds=punishment_duration))
self.call('InfractionsPlugin.queue_infractions')
def get_invite_info(self, code):
if rdb.exists('inv:{}'.format(code)):
return json.loads(rdb.get('inv:{}'.format(code)))
try:
obj = self.client.api.invites_get(code)
except:
return
obj = {
'id': obj.guild.id,
'name': obj.guild.name,
'icon': obj.guild.icon
}
# Cache for 12 hours
rdb.setex('inv:{}'.format(code), json.dumps(obj), 43200)
return obj
@Plugin.listen('MessageUpdate')
def on_message_update(self, event):
try:
msg = Message.get(id=event.id)
except Message.DoesNotExist:
self.log.info('Not censoring MessageUpdate for id {e.channel_id}, {e.id}, no stored message'.format(e=event))
return
if not event.content:
return
return self.on_message_create(
event,
author=event.guild.get_member(msg.author_id))
@Plugin.listen('MessageCreate')
def on_message_create(self, event, author=None):
author = author or event.author
if author.id == self.state.me.id:
return
if event.webhook_id:
return
configs = list(self.compute_relevant_configs(event, author))
if not configs:
return
tags = {'guild_id': event.guild.id, 'channel_id': event.channel.id}
with timed('rowboat.plugin.censor.duration', tags=tags):
try:
# TODO: perhaps imap here? how to raise exception then?
for config in configs:
if config.filter_zalgo:
if not event.channel.id in config.zalgo_channel_whitelist:
self.filter_zalgo(event, config)
if config.filter_invites:
if not event.channel.id in config.invites_channel_whitelist:
self.filter_invites(event, config)
if config.filter_domains:
if not event.channel.id in config.domains_channel_whitelist:
self.filter_domains(event, config)
if config.blocked_words or config.blocked_tokens:
if not event.channel.id in config.words_channel_whitelist:
self.filter_blocked_words(event, config)
if config.message_char_limit:
if not event.channel.id in config.char_limit_channel_whitelist:
self.filter_message_len(event, config)
except Censorship as c:
self.call(
'ModLogPlugin.create_debounce',
event,
['MessageDelete'],
message_id=event.message.id,
)
try:
if 'invite' in c.details:
censor_type = 'max_invite'
elif 'domain' in c.details:
censor_type = 'max_domain'
elif 'word' in c.details:
censor_type = 'max_word'
elif 'zalgo' in c.details:
censor_type = 'max_zalgo'
elif 'length' in c.details:
censor_type = 'max_length'
else:
censor_type = None
event.delete()
self.call(
'ModLogPlugin.log_action_ext',
Actions.CENSORED,
event.guild.id,
e=event,
c=c)
if censor_type:
level = int(self.bot.plugins.get('CorePlugin').get_level(event.guild, event.author))
member = event.guild.get_member(event.author)
for rule in event.config.compute_relevant_rules(event, level):
self.check_message_simple(event, member, rule, censor_type)
except Violation as v:
self.violate(v)
except APIException:
self.log.exception('Failed to delete censored message: {}'.format(event.id))
def check_message_simple(self, event, member, rule, censor_type):
def check_bucket(name, base_text, func):
bucket = rule.get_bucket(name, event.guild.id)
if not bucket:
return
if not bucket.check(event.author.id, func(event) if callable(func) else func):
raise Violation(rule, event, member,
name.upper(),
base_text + ' ({} / {}s)'.format(bucket.count(event.author.id), bucket.size(event.author.id)))
check_bucket(censor_type, 'To many censor violations', 1)
def filter_zalgo(self, event, config):
s = ZALGO_RE.search(event.content)
if s:
raise Censorship(CensorReason.ZALGO, event, ctx={
'position': s.start(),
'warn_on_censor': config.warn_on_censor,
's': self,
})
def filter_invites(self, event, config):
invites = INVITE_LINK_RE.findall(event.content)
for _, invite in invites:
invite_info = self.get_invite_info(invite)
need_whitelist = (
config.invites_guild_whitelist or
(config.invites_whitelist or not config.invites_blacklist)
)
whitelisted = False
if invite_info and invite_info.get('id') in config.invites_guild_whitelist:
whitelisted = True
if invite.lower() in config.invites_whitelist:
whitelisted = True
if need_whitelist and not whitelisted:
raise Censorship(CensorReason.INVITE, event, ctx={
'hit': 'whietlist',
'invite': invite,
'guild': invite_info,
'warn_on_censor': config.warn_on_censor,
's': self,
})
elif config.invites_blacklist and invite.lower() in config.invites_blacklist:
raise Censorship(CensorReason.INVITE, event, ctx={
'hit': 'blacklist',
'invite': invite,
'guild': invite_info,
'warn_on_censor': config.warn_on_censor,
's': self,
})
def filter_domains(self, event, config):
urls = URL_RE.findall(INVITE_LINK_RE.sub('', event.content))
for url in urls:
try:
parsed = urlparse.urlparse(url)
except:
continue
if (config.domains_whitelist or not config.domains_blacklist)\
and parsed.netloc.lower() not in config.domains_whitelist:
raise Censorship(CensorReason.DOMAIN, event, ctx={
'hit': 'whitelist',
'url': url,
'domain': parsed.netloc,
'warn_on_censor': config.warn_on_censor,
's': self,
})
elif config.domains_blacklist and parsed.netloc.lower() in config.domains_blacklist:
raise Censorship(CensorReason.DOMAIN, event, ctx={
'hit': 'blacklist',
'url': url,
'domain': parsed.netloc,
'warn_on_censor': config.warn_on_censor,
's': self,
})
def filter_blocked_words(self, event, config):
blocked_words = config.blocked_re.findall(event.content)
if blocked_words:
raise Censorship(CensorReason.WORD, event, ctx={
'words': blocked_words,
'warn_on_censor': config.warn_on_censor,
's': self,
})
def filter_message_len(self, event, config):
if config.message_char_limit and config.message_char_limit > 0 and len(event.content) > config.message_char_limit:
raise Censorship(CensorReason.LENGTH, event, ctx={
'length': len(event.content),
'allowed': config.message_char_limit,
'warn_on_censor': config.warn_on_censor,
's': self,
})
@Plugin.listen('GuildMemberUpdate', priority=Priority.BEFORE)
def on_guild_member_update(self, event):
pre_member = event.guild.members.get(event.id)
if not pre_member:
return
if (pre_member.nick or event.nick) and pre_member.nick != event.nick:
configs = list(self.compute_relevant_configs(event, event))
if not configs:
return
for config in configs:
if config.blocked_nicknames:
self.filter_blocked_nicknames(event, config, pre_member)
if config.block_zalgo_nicknames:
self.filter_zalgo_nicknames(event, config, pre_member)
@Plugin.listen('PresenceUpdate', priority=Priority.BEFORE)
def on_guild_member_presence_update(self, event):
if not event.user:
return
pre_member = event.guild.members.get(event.user.id)
pre_user = self.state.users.get(event.user.id)
if not pre_user:
return
if event.user.username is not UNSET and event.user.username != pre_user.username:
if pre_member.nick and event.user.username == pre_member.nick:
configs = list(self.compute_relevant_configs(event, event))
if not configs:
return
for config in configs:
if config.blocked_nicknames:
self.filter_blocked_nicknames(event, config, pre_member)
if config.block_zalgo_nicknames:
self.filter_zalgo_nicknames(event, config, pre_member)
@Plugin.listen('GuildMemberAdd')
def on_guild_member_add(self, event):
configs = list(self.compute_relevant_configs(event, event))
if not configs:
return
for config in configs:
if config.antiraid:
self.anti_raid(event, config)
if config.blocked_nicknames:
self.filter_blocked_nicknames(event, config)
if config.block_zalgo_nicknames:
self.filter_zalgo_nicknames(event, config)
def anti_raid(self, event, config):
now = datetime.utcnow()
antiraidjoin_key = 'ar:join:{}:{}'.format(event.guild.id, event.member.user.id)
antiraidinterval_key = 'ar:int:{}:{}'.format(event.guild.id, event.member.user.id)
antiraidenabled_key = 'ar:true:{}'.format(event.guild.id)
ttl_a = rdb.ttl(antiraidjoin_key)
ttl_b = rdb.ttl(antiraidinterval_key)
ttl_c = rdb.ttl(antiraidenabled_key)
if not ttl_a:
rdb.set(antiraidjoin_key, datetime.strftime(now, '%Y-%m-%d %H:%M:%S.%f'), ex=config.antiraid.key_duration)
if not ttl_b:
rdb.set(antiraidinterval_key, datetime.strftime(now, '%Y-%m-%d %H:%M:%S.%f'), ex=config.antiraid.interval)
if ttl_c:
# we are in the middle of a potential raid so just apply anti raidrole
if config.antiraid.raidrole in event.member.roles:
return
self.call(
'ModLogPlugin.create_debounce',
event,
['GuildMemberUpdate'],
user_id=event.member.user.id,
role_id=config.antiraid.raidrole,
)
event.member.add_role(config.antiraid.raidrole, reason='ANTIRAID')
self.call(
'ModLogPlugin.log_action_ext',
Actions.RAID_MEMBER,
event.guild.id,
user=event.member.user,
)
return
k = rdb.keys('ar:int:{}:*'.format(event.guild.id))
if len(k) >= config.antiraid.count:
# enable antiraid
rdb.set(antiraidenabled_key, datetime.strftime(now, '%Y-%m-%d %H:%M:%S.%f'), ex=config.antiraid.lockdown_duration)
striplen = len('ar:join:{}:'.format(event.guild.id))
ids = rdb.keys('ar:join:{}:*'.format(event.guild.id))
for c, i in enumerate(ids, 1):
m = event.guild.get_member(i[striplen:])
if not m:
continue
if config.antiraid.raidrole in m.roles:
continue
self.call(
'ModLogPlugin.create_debounce',
event,
['GuildMemberUpdate'],
user_id=m.user.id,
role_id=config.antiraid.raidrole,
)
m.add_role(config.antiraid.raidrole, reason='ANTIRAID')
self.call(
'ModLogPlugin.log_action_ext',
Actions.RAID_MEMBER,
event.guild.id,
user=m.user,
)
self.call(
'ModLogPlugin.log_action_ext',
Actions.RAID,
event.guild.id,
e=event,
notify=config.antiraid.notifyrole
)
@Plugin.command('antiraid', '[cmd:str...]', aliases=['raid'], level=CommandLevels.MOD)
def antiraid_cmd(self, event, cmd=None):
antiraidenabled_key = 'ar:true:{}'.format(event.guild.id)
if cmd is None:
since, ttl = rdb.get(antiraidenabled_key), rdb.ttl(antiraidenabled_key)
now = datetime.utcnow()
since = datetime.strptime(since, '%Y-%m-%d %H:%M:%S.%f') if since else now
diff = now - since
if not ttl:
return event.msg.reply('No guild antiraid is currently in place.').after(5).delete()
return event.msg.reply('Guild antiraid has been active for {}, and will expire in {} second{}.'.format(
humanize.naturaldelta(diff), ttl, 's' if ttl != 1 else ''
)).after(5).delete()
if cmd.lower() == 'disable':
rdb.delete(antiraidenabled_key)
# find all users in the role and remove it
i = 0
for member in event.guild.members:
for level, config in event.config.levels.items():
if not config.antiraid:
continue
if config.antiraid.raidrole in event.guild.get_member(member).roles:
self.call(
'ModLogPlugin.create_debounce',
event,
['GuildMemberUpdate'],
role_id=config.antiraid.raidrole,
)
i += 1
event.guild.get_member(member).remove_role(config.antiraid.raidrole)
return event.msg.reply('Antiraid has been disabled. `{}` users have been returned to normal'.format(str(i))).after(10).delete()
def filter_blocked_nicknames(self, event, config, pre_member=None):
if not event.nick:
nickname = unicode(event.user)
else:
nickname = event.nick
blocked_nicknames = config.blockednick_re.findall(nickname)
if blocked_nicknames:
if not pre_member:
newnick = 'censored name {}'.format(random.randint(1,5000))
event.set_nickname(newnick)
else:
pre_member.set_nickname(pre_member.nick)
reason='Blacklisted usernames `{}`'.format(
u', '.join([S(i, escape_codeblocks=True) for i in blocked_nicknames]))
self.call(
'ModLogPlugin.log_action_ext',
Actions.CHANGE_NICK_BLOCKED,
event.guild.id,
e=event,
before=pre_member.nick if hasattr(pre_member, 'nick') else '<NO_NICK>',
after=event.nick if hasattr(event, 'nick') else '<NO_NICK>',
reason=reason
)
if config.warn_on_censor:
Infraction.warn(self, self.state.me, event, reason, guild=event.member.guild)
def filter_zalgo_nicknames(self, event, config, pre_member=None):
if not event.nick:
nickname = unicode(event.user)
else:
nickname = event.nick
zalgo_nicknames = ZALGO_RE.search(nickname)
if zalgo_nicknames:
if not pre_member:
newnick = 'censored name {}'.format(random.randint(1,5000))
event.set_nickname(newnick)
else:
pre_member.set_nickname(pre_member.nick)
reason='Blacklisted usernames, zalgo detected'
self.call(
'ModLogPlugin.log_action_ext',
Actions.CHANGE_NICK_BLOCKED,
event.guild.id,
e=event,
before=pre_member.nick if hasattr(pre_member, 'nick') else '<NO_NICK>',
after=event.nick if hasattr(event, 'nick') else '<NO_NICK>',
reason=reason
)
if config.warn_on_censor:
Infraction.warn(self, self.state.me, event, reason, guild=event.member.guild)
| |
# Copyright 2015 Planet Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import boto3
import flask
from flask import jsonify, Response, url_for
from flask import current_app as app
import os
import simplejson as json
from querier import ArchiveQuerier, Cursor, InvalidCursor, \
DEFAULT_LOOKBACK_DAYS
from fetcher import ArchiveFileFetcher
from datalake.common.errors import NoSuchDatalakeFile
from datalake.common.metadata import Metadata, InvalidDatalakeMetadata
v0 = flask.Blueprint('v0', __name__, url_prefix='/v0')
def _get_aws_kwargs():
kwargs = dict(
region_name=app.config.get('AWS_REGION'),
)
for k in ['AWS_SECRET_ACCESS_KEY', 'AWS_ACCESS_KEY_ID']:
# these guys must be fully absent from the kwargs; None will not
# do.
if app.config.get(k) is not None:
kwargs[k.lower()] = app.config[k]
return kwargs
def get_dynamodb():
if not hasattr(app, 'dynamodb'):
kwargs = _get_aws_kwargs()
app.dynamodb = boto3.resource('dynamodb', **kwargs)
return app.dynamodb
def get_archive_querier():
if not hasattr(app, 'archive_querier'):
table_name = app.config.get('DYNAMODB_TABLE')
app.archive_querier = ArchiveQuerier(table_name,
dynamodb=get_dynamodb())
return app.archive_querier
@v0.route('/archive/')
def archive_get():
"""Archive status
Get the archive details.
---
tags:
- archive
responses:
200:
description: success
schema:
id: DatalakeMetadataList
required:
- storage_url
properties:
storage_url:
type: string
description: base url where clients should push files.
"""
response = dict(
storage_url=app.config.get('DATALAKE_STORAGE_URL')
)
return jsonify(response)
@v0.errorhandler(400)
@v0.errorhandler(404)
def handle_4xx_status(err):
body = {'message': err.response, 'code': err.description}
return jsonify(body), err.code
def _convert_param_to_ms(params, key):
if key not in params:
return
try:
params[key] = Metadata.normalize_date(params[key])
except InvalidDatalakeMetadata:
msg = key + ' must be milliseconds since the epoch.'
flask.abort(400, 'InvalidTime', msg)
def _validate_files_params(params):
if len(params) == 0:
flask.abort(400, 'NoArgs', 'Please provide minimal query arguments')
if 'what' not in params:
flask.abort(400, 'NoWhat', 'You must provide the `what` paramater')
if 'work_id' not in params and 'start' not in params and \
'end' not in params:
msg = 'You must provide either work_id or start/end'
flask.abort(400, 'NoWorkInterval', msg)
if 'work_id' in params and ('start' in params or 'end' in params):
msg = 'You must provide only work_id or start/end. Not both.'
flask.abort(400, 'InvalidWorkInterval', msg)
if ('start' in params and 'end' not in params) or \
('end' in params and 'start' not in params):
msg = 'start and end must always be provided together.'
flask.abort(400, 'InvalidWorkInterval', msg)
validated = _copy_immutable_dict(params)
_convert_param_to_ms(validated, 'start')
_convert_param_to_ms(validated, 'end')
if 'start' in validated and 'end' in validated:
if validated['start'] > validated['end']:
msg = 'start must be before end'
flask.abort(400, 'InvalidWorkInterval', msg)
_validate_cursor(validated)
return validated
def _validate_cursor(params):
try:
params['cursor'] = _get_cursor(params)
except InvalidCursor as e:
flask.abort(400, 'InvalidCursor', e.message)
def _get_cursor(params):
c = params.get('cursor')
if c is None:
return None
return Cursor.from_serialized(c)
def _copy_immutable_dict(d):
return {k: v for k, v in d.iteritems()}
@v0.route('/archive/files/')
def files_get():
'''List files
Retrieve metadata for files subject to query parameters.
You must always specify the `what` parameter.
You must either specify work_id or start/end interval of the files in which
you are interested.
If you specify start you must also specify end.
Returns metadata for at most 100 files. If more files are available, the
`next` property in the response will be a url that may be used to retrieve
the next page of files.
Note that no single page will contain duplicate files. However, under some
circumstances, requests specifying a start and end time (as opposed to a
work_id) may return duplicate records in subsequent pages. So applications
that expect to retrieve multiple pages of results should tolerate
duplicates. Alternatively, such applications could query for a narrower
time interval.
---
tags:
- files
parameters:
- in: query
name: what
description:
Only return files from here.
type: string
required: true
- in: query
name: where
description:
Only return files from here.
type: string
- in: query
name: work_id
description:
Only return files with this work_id.
type: string
- in: query
name: start
description:
Only return files with data after this start time in ms since
the epoch.
type: string
- in: query
name: end
description:
Only return files with data before this end time in ms since
the epoch.
type: string
responses:
200:
description: success
schema:
id: DatalakeRecordList
required:
- records
- next
properties:
records:
type: array
description: the list of metadata records matching the query.
May be an empty list
items:
schema:
id: DatalakeRecord
required:
- url
- metadata
properties:
url:
type: string
description: s3 url where the file may be retrieved
http_url:
type: string
description: http url where the file contents
create_time:
type: integer
description: the creation time of the file in the
datalake (ms since the epoch)
size:
type: integer
description: the size of the file in bytes
metadata:
schema:
id: DatalakeMetadata
required:
- version
- where
- start
- end
- path
- work_id
- where
- id
- hash
properties:
version:
type: integer
description: the version of the metadata
record
where:
type: string
description: where the file came from
start:
type: integer
description: the start time of the file in ms
since the epoch
end:
type: integer
description: the end time of the file in ms
since the epoch. This may be
null if the file is associated
with an instant
path:
type: string
description: the path of the original file.
work_id:
type: string
description: the work_id associated with the
file. This may be null.
where:
type: string
description: the location or server that
generated the file
what:
type: string
description: the process or program that
generated the file
id:
type: string
description: the unique id of the file in the
datalake
hash:
type: string
description: 16-byte blake2 hash of the file
content
next:
type: string
description: url to get the next results. Will be null if
there are no more results
400:
description: bad request
schema:
id: DatalakeAPIError
required:
- code
- message
properties:
code:
type: string
description: code associated with this error
message:
type: string
description: human-readable message indicating why the
request failed
'''
params = flask.request.args
params = _validate_files_params(params)
aq = get_archive_querier()
response = {}
work_id = params.get('work_id')
if work_id is not None:
results = aq.query_by_work_id(work_id,
params.get('what'),
where=params.get('where'),
cursor=params.get('cursor'))
else:
# we are guaranteed by the validate routine that this is a start/end
# time-based query.
results = aq.query_by_time(params['start'],
params['end'],
params['what'],
where=params.get('where'),
cursor=params.get('cursor'))
[r.update(http_url=_get_canonical_http_url(r)) for r in results]
response = {
'records': results,
'next': _get_next_url(flask.request, results),
}
return Response(json.dumps(response), content_type='application/json')
def _get_canonical_http_url(record):
return url_for('v0.file_get_contents', file_id=record['metadata']['id'],
_external=True)
def _get_next_url(request, results):
if results.cursor is None:
return None
return _get_url_with_cursor(request, results.cursor)
def _get_url_with_cursor(request, cursor):
args = _copy_immutable_dict(request.args)
args['cursor'] = cursor.serialized
return url_for(request.endpoint, _external=True, **args)
def get_s3_bucket():
if not hasattr(app, 's3_bucket'):
kwargs = _get_aws_kwargs()
s3 = boto3.resource('s3', **kwargs)
bucket_url = app.config.get('DATALAKE_STORAGE_URL')
bucket_name = bucket_url.rstrip('/').split('/')[-1]
app.s3_bucket = s3.Bucket(bucket_name)
return app.s3_bucket
def get_archive_fetcher():
if not hasattr(app, 'archive_fetcher'):
app.archive_fetcher = ArchiveFileFetcher(get_s3_bucket())
return app.archive_fetcher
def _get_file(file_id):
try:
aff = get_archive_fetcher()
return aff.get_file(file_id)
except NoSuchDatalakeFile as e:
flask.abort(404, 'NoSuchFile', e.message)
def _get_headers_for_file(f):
headers = {}
if f.content_type is None:
headers['Content-Type'] = 'text/plain'
else:
headers['Content-Type'] = f.content_type
if f.content_encoding is not None:
headers['Content-Encoding'] = f.content_encoding
return headers
def _get_latest(what, where, lookback):
aq = get_archive_querier()
f = aq.query_latest(what, where, lookback_days=lookback)
if f is not None:
return f
m = 'No "{}" files found in last {} days from "{}"'
m = m.format(what, lookback, where)
flask.abort(404, 'NoSuchFile', m)
@v0.route('/archive/files/<file_id>/data')
def file_get_contents(file_id):
'''Retrieve a file
Retrieve a file's contents.
---
tags:
- file contents
parameters:
- in: path
name: file_id
description:
The id of the file to retrieve
type: string
required: true
responses:
200:
description: success
schema:
type: file
404:
description: no such file
schema:
id: DatalakeAPIError
'''
f = _get_file(file_id)
headers = _get_headers_for_file(f)
return f.read(), 200, headers
@v0.route('/archive/files/<file_id>/metadata')
def file_get_metadata(file_id):
'''Retrieve metadata for a file
Retrieve a file's metadata.
---
tags:
- file contents
parameters:
- in: path
name: file_id
description:
The id of the file whose metadata to retrieve
type: string
required: true
responses:
200:
description: success
schema:
id: DatalakeMetadata
404:
description: no such file
schema:
id: DatalakeAPIError
'''
f = _get_file(file_id)
return Response(json.dumps(f.metadata), content_type='application/json')
def _validate_lookback(lookback):
try:
return int(lookback)
except ValueError:
msg = 'lookback must be an integer not {}'.format(type(lookback))
flask.abort(400, 'InvalidLookback', msg)
def _validate_latest_params(params):
validated = _copy_immutable_dict(params)
if 'lookback' in params:
validated['lookback'] = _validate_lookback(validated['lookback'])
return validated
@v0.route('/archive/latest/<what>/<where>')
def latest_get(what, where):
'''Retrieve the latest file for a give what and where
Retrieve latest file. Note that the current implementation of latest only
examines the last 14 days of files by default. If you expect files older
than this, you must retrieve them using the files endpoint or set the
`lookback` parameter to something that works for you. Note that there may
be a performance pentalty for very large lookbacks.
---
tags:
- latest
parameters:
- in: path
name: what
description:
The process or program of interest
type: string
required: true
- in: path
name: where
description:
The location of interest (e.g., server or location)
type: string
required: true
- in: query
name: lookback
description:
The number of days to lookback for the latest file. The default
is 14.
type: integer
responses:
200:
description: success
schema:
id: DatalakeRecord
404:
description: no latest file found for the given what or where since the
lookback.
schema:
id: DatalakeAPIError
'''
params = flask.request.args
params = _validate_latest_params(params)
f = _get_latest(what, where, params.get('lookback', DEFAULT_LOOKBACK_DAYS))
f.update(http_url=_get_canonical_http_url(f))
return Response(json.dumps(f), content_type='application/json')
@v0.route('/archive/latest/<what>/<where>/data')
def latest_get_contents(what, where):
'''Retrieve the latest file data for a given what and where
Note that the current implementation of latest only examines the last 14
days of files by default. If you expect files older than this, you must
retrieve them using the files endpoint or set the `lookback` parameter to
something that works for you. Note that there may be a performance pentalty
for very large lookbacks.
---
tags:
- latest
parameters:
- in: path
name: what
description:
The process or program of interest
type: string
required: true
- in: path
name: where
description:
The location of interest (e.g., server or location)
type: string
required: true
- in: query
name: lookback
description:
The number of days to lookback for the latest file. The default
is 14.
type: integer
responses:
200:
description: success
schema:
type: file
404:
description: no latest file found for the given what or where in the
last 14 days.
schema:
id: DatalakeAPIError
'''
params = flask.request.args
params = _validate_latest_params(params)
f = _get_latest(what, where, params.get('lookback', DEFAULT_LOOKBACK_DAYS))
f = _get_file(f['metadata']['id'])
headers = _get_headers_for_file(f)
return f.read(), 200, headers
def get_build_version():
build_sha = 'UNKNOWN'
if os.path.exists('/version.txt'):
with open('/version.txt', 'r') as f:
build_sha = f.read().strip()
return build_sha
@v0.route('/environment/')
def environment():
'''
Get information about the environment (eg. build version).
---
tags:
- environment
responses:
200:
description: success
schema:
type: object
properties:
data:
type: object
properties:
build:
type: object
properties:
version:
type: string
'''
return Response(json.dumps({
'data': {
'build': {
'version': get_build_version()
}
}
}), content_type='application/json')
| |
##########################################################################
#
# Copyright (c) 2009-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import sys
import IECore
import IECoreScene
class TestNParticleReader( unittest.TestCase ) :
def testConstruction( self ) :
r = IECore.Reader.create( "test/IECore/data/iffFiles/nParticleFrame2.mc" )
self.assert_( r.isInstanceOf( "ParticleReader" ) )
self.assertEqual( type( r ), IECoreScene.NParticleReader )
self.assertEqual( r["fileName"].getValue().value, "test/IECore/data/iffFiles/nParticleFrame2.mc" )
def testReadWithPrimVarConversion( self ) :
r = IECore.Reader.create( "test/IECore/data/iffFiles/nParticleFrame2.mc" )
self.assertEqual( type( r ), IECoreScene.NParticleReader )
r.parameters()["realType"].setValue( "native" )
self.assertEqual( r.numParticles(), 4 )
self.assertEqual( len(r.frameTimes()), 1 )
attrNames = r.attributeNames()
expectedAttrNamesAndTypes = {
"nParticleShape1_id" : IECore.DoubleVectorData,
"nParticleShape1_birthTime" : IECore.DoubleVectorData,
"nParticleShape1_position" : IECore.V3dVectorData,
"nParticleShape1_lifespanPP" : IECore.DoubleVectorData,
"nParticleShape1_finalLifespanPP" : IECore.DoubleVectorData,
"nParticleShape1_velocity" : IECore.V3dVectorData,
}
self.assertEqual( len( attrNames ), len( expectedAttrNamesAndTypes ) )
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in attrNames )
c = r.read()
expectedConvertedAttrNamesAndTypes = {
"nParticleShape1_id" : IECore.DoubleVectorData,
"nParticleShape1_birthTime" : IECore.DoubleVectorData,
"P" : IECore.V3dVectorData,
"nParticleShape1_lifespanPP" : IECore.DoubleVectorData,
"nParticleShape1_finalLifespanPP" : IECore.DoubleVectorData,
"nParticleShape1_velocity" : IECore.V3dVectorData,
}
self.assertEqual( type( c ), IECoreScene.PointsPrimitive )
self.assertEqual( len( c ), len( expectedConvertedAttrNamesAndTypes ) )
for i in expectedConvertedAttrNamesAndTypes.keys() :
self.assert_( i in c )
self.assertEqual( type(c[i].data), expectedConvertedAttrNamesAndTypes[i] )
self.assertEqual( len(c[i].data), r.numParticles() )
for p in c["P"].data :
self.assert_( abs( p.x ) < 0.022 )
self.assert_( abs( p.y ) < 0.017 )
self.assert_( abs( p.z ) < 0.020 )
self.assertEqual( c["nParticleShape1_id"].data, IECore.DoubleVectorData( range( 0, 4 ) ) )
def testReadNoPrimVarConversion( self ) :
r = IECore.Reader.create( "test/IECore/data/iffFiles/nParticleFrame2.mc" )
self.assertEqual( type( r ), IECoreScene.NParticleReader )
r["convertPrimVarNames"].setValue( IECore.BoolData( False ) )
r["realType"].setValue( "native" )
self.assertFalse( r.parameters()["convertPrimVarNames"].getTypedValue() )
self.assertEqual( r.numParticles(), 4 )
self.assertEqual( len(r.frameTimes()), 1 )
attrNames = r.attributeNames()
expectedAttrNamesAndTypes = {
"nParticleShape1_id" : IECore.DoubleVectorData,
"nParticleShape1_birthTime" : IECore.DoubleVectorData,
"nParticleShape1_position" : IECore.V3dVectorData,
"nParticleShape1_lifespanPP" : IECore.DoubleVectorData,
"nParticleShape1_finalLifespanPP" : IECore.DoubleVectorData,
"nParticleShape1_velocity" : IECore.V3dVectorData,
}
self.assertEqual( len( attrNames ), len( expectedAttrNamesAndTypes ) )
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in attrNames )
c = r.read()
self.assertEqual( type( c ), IECoreScene.PointsPrimitive )
self.assertEqual( len( c ), len( expectedAttrNamesAndTypes ) )
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in c )
self.assertEqual( type(c[i].data), expectedAttrNamesAndTypes[i] )
self.assertEqual( len(c[i].data), r.numParticles() )
for p in c["nParticleShape1_position"].data :
self.assert_( abs( p.x ) < 0.022 )
self.assert_( abs( p.y ) < 0.017 )
self.assert_( abs( p.z ) < 0.020 )
self.assertEqual( c["nParticleShape1_id"].data, IECore.DoubleVectorData( range( 0, 4 ) ) )
def testMultiFrameFiles( self ) :
r = IECore.Reader.create( "test/IECore/data/iffFiles/nParticleMultipleFrames.mc" )
r.parameters()["realType"].setValue( "native" )
self.assertTrue( r.parameters()["convertPrimVarNames"].getTypedValue() )
self.assertEqual( len(r.frameTimes()), 10 )
self.assertEqual( r.numParticles(), 0 )
attrNames = r.attributeNames()
self.assertEqual( len( attrNames ), 0 )
c = r.read()
self.assertEqual( type( c ), IECoreScene.PointsPrimitive )
self.assertEqual( len( c ), 0 )
self.assertEqual( c.numPoints, 0 )
r.parameters()['frameIndex'].setValue( 5 )
self.assertEqual( r.numParticles(), 20 )
attrNames = r.attributeNames()
expectedAttrNamesAndTypes = {
"testParticleShape_id" : IECore.DoubleVectorData,
"testParticleShape_birthTime" : IECore.DoubleVectorData,
"testParticleShape_position" : IECore.V3dVectorData,
"testParticleShape_lifespanPP" : IECore.DoubleVectorData,
"testParticleShape_finalLifespanPP" : IECore.DoubleVectorData,
"testParticleShape_velocity" : IECore.V3dVectorData,
}
self.assertEqual( len( attrNames ), len( expectedAttrNamesAndTypes ) )
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in attrNames )
c = r.read()
self.assertEqual( type( c ), IECoreScene.PointsPrimitive )
self.assertEqual( len( c ), len( expectedAttrNamesAndTypes ) )
expectedConvertedAttrNamesAndTypes = {
"testParticleShape_id" : IECore.DoubleVectorData,
"testParticleShape_birthTime" : IECore.DoubleVectorData,
"P" : IECore.V3dVectorData,
"testParticleShape_lifespanPP" : IECore.DoubleVectorData,
"testParticleShape_finalLifespanPP" : IECore.DoubleVectorData,
"testParticleShape_velocity" : IECore.V3dVectorData,
}
for i in expectedConvertedAttrNamesAndTypes.keys() :
self.assert_( i in c )
self.assertEqual( type(c[i].data), expectedConvertedAttrNamesAndTypes[i] )
self.assertEqual( len(c[i].data), r.numParticles() )
for p in c["P"].data :
self.assert_( abs( p.x ) < 0.159 )
self.assert_( abs( p.y ) < 0.145 )
self.assert_( abs( p.z ) < 0.138 )
def testFiltering( self ) :
r = IECore.Reader.create( "test/IECore/data/iffFiles/nParticleMultipleFrames.mc" )
r.parameters()['frameIndex'].setValue( 5 )
attributesToLoad = [ "testParticleShape_birthTime", "testParticleShape_position" ]
r.parameters()["percentage"].setValue( IECore.FloatData( 50 ) )
r.parameters()["attributes"].setValue( IECore.StringVectorData( attributesToLoad ) )
a = r.readAttribute( "testParticleShape_position" )
# what the acceptable thresholds should be are somewhat debatable,
# especially for such a small number of particles
self.assert_( len( a ) < 13 )
self.assert_( len( a ) > 7 )
p = r.read()
self.assert_( p.numPoints < 13 )
self.assert_( p.numPoints > 7 )
convertedAttributes = [ "testParticleShape_birthTime", "P" ]
for attr in convertedAttributes :
self.assertEqual( p.numPoints, p[attr].data.size() )
def testConversion( self ) :
r = IECore.Reader.create( "test/IECore/data/iffFiles/nParticleMultipleFrames.mc" )
self.assertEqual( type( r ), IECoreScene.NParticleReader )
r.parameters()["realType"].setValue( "float" )
r.parameters()['frameIndex'].setValue( 5 )
attrNames = r.attributeNames()
expectedAttrNamesAndTypes = {
"testParticleShape_id" : IECore.FloatVectorData,
"testParticleShape_birthTime" : IECore.FloatVectorData,
"testParticleShape_position" : IECore.V3fVectorData,
"testParticleShape_lifespanPP" : IECore.FloatVectorData,
"testParticleShape_finalLifespanPP" : IECore.FloatVectorData,
"testParticleShape_velocity" : IECore.V3fVectorData,
}
c = r.read()
expectedConvertedAttrNamesAndTypes = {
"testParticleShape_id" : IECore.FloatVectorData,
"testParticleShape_birthTime" : IECore.FloatVectorData,
"P" : IECore.V3fVectorData,
"testParticleShape_lifespanPP" : IECore.FloatVectorData,
"testParticleShape_finalLifespanPP" : IECore.FloatVectorData,
"testParticleShape_velocity" : IECore.V3fVectorData,
}
self.assertEqual( type( c ), IECoreScene.PointsPrimitive )
self.assertEqual( len( c ), len( expectedConvertedAttrNamesAndTypes ) )
for i in expectedConvertedAttrNamesAndTypes.keys() :
self.assert_( i in c )
self.assertEqual( type(c[i].data), expectedConvertedAttrNamesAndTypes[i] )
self.assertEqual( len(c[i].data), r.numParticles() )
for p in c["P"].data :
self.assert_( abs( p.x ) < 0.159 )
self.assert_( abs( p.y ) < 0.145 )
self.assert_( abs( p.z ) < 0.138 )
def testFileNameChange( self ) :
"""Now Readers are Ops, the filename can be changed and read() can be called
again. So we need to check that that works."""
r = IECore.Reader.create( "test/IECore/data/iffFiles/nParticleMultipleFrames.mc" )
self.assertEqual( type( r ), IECoreScene.NParticleReader )
r.parameters()["realType"].setValue( "float" )
r.parameters()['frameIndex'].setValue( 5 )
expectedAttrNamesAndTypes = {
"testParticleShape_id" : IECore.FloatVectorData,
"testParticleShape_birthTime" : IECore.FloatVectorData,
"P" : IECore.V3fVectorData,
"testParticleShape_lifespanPP" : IECore.FloatVectorData,
"testParticleShape_finalLifespanPP" : IECore.FloatVectorData,
"testParticleShape_velocity" : IECore.V3fVectorData,
}
c = r.read()
self.assertEqual( type( c ), IECoreScene.PointsPrimitive )
self.assertEqual( len( c ), len( expectedAttrNamesAndTypes ) )
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in c )
self.assertEqual( type(c[i].data), expectedAttrNamesAndTypes[i] )
self.assertEqual( len(c[i].data), r.numParticles() )
for p in c["P"].data :
self.assert_( abs( p.x ) < 0.159 )
self.assert_( abs( p.y ) < 0.145 )
self.assert_( abs( p.z ) < 0.138 )
r["fileName"].setValue( IECore.StringData( "test/IECore/data/iffFiles/nParticleFrame2.mc" ) )
r.parameters()['frameIndex'].setValue( 0 )
self.assertEqual( r.numParticles(), 4 )
expectedAttrNamesAndTypes = {
"nParticleShape1_id" : IECore.FloatVectorData,
"nParticleShape1_birthTime" : IECore.FloatVectorData,
"P" : IECore.V3fVectorData,
"nParticleShape1_lifespanPP" : IECore.FloatVectorData,
"nParticleShape1_finalLifespanPP" : IECore.FloatVectorData,
"nParticleShape1_velocity" : IECore.V3fVectorData,
}
c = r.read()
self.assertEqual( type( c ), IECoreScene.PointsPrimitive )
self.assertEqual( len( c ), len( expectedAttrNamesAndTypes ) )
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in c )
self.assertEqual( type(c[i].data), expectedAttrNamesAndTypes[i] )
self.assertEqual( len(c[i].data), 4 )
for p in c["P"].data :
self.assert_( abs( p.x ) < 0.022 )
self.assert_( abs( p.y ) < 0.017 )
self.assert_( abs( p.z ) < 0.020 )
def testNClothAsParticles( self ) :
r = IECore.Reader.create( "test/IECore/data/iffFiles/nClothFrame3.mc" )
self.assertEqual( type( r ), IECoreScene.NParticleReader )
r.parameters()["realType"].setValue( "native" )
self.assertEqual( r.numParticles(), 349 )
self.assertEqual( len(r.frameTimes()), 1 )
attrNames = r.attributeNames()
expectedAttrNamesAndTypes = {
"nClothShape1" : IECore.V3dVectorData,
}
self.assertEqual( len( attrNames ), len( expectedAttrNamesAndTypes ) )
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in attrNames )
c = r.read()
self.assertEqual( type( c ), IECoreScene.PointsPrimitive )
self.assertEqual( len( c ), len( expectedAttrNamesAndTypes ) )
for i in expectedAttrNamesAndTypes.keys() :
self.assert_( i in c )
self.assertEqual( type(c[i].data), expectedAttrNamesAndTypes[i] )
self.assertEqual( len(c[i].data), r.numParticles() )
for p in c["nClothShape1"].data :
self.assert_( abs( p.x ) < 320.50 )
self.assert_( abs( p.y ) < 119.41 )
self.assert_( abs( p.z ) < 554.64 )
def testParameterTypes( self ) :
p = IECoreScene.NParticleReader()
self.assert_( p.resultParameter().isInstanceOf( "ObjectParameter" ) )
self.assertEqual( p.resultParameter().validTypes(), [ IECoreScene.TypeId.PointsPrimitive ] )
if __name__ == "__main__":
unittest.main()
| |
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
import time
import json
import base64
import md5
import threading
from threading import Thread
from request_db import request_db
from pydbgpd_helper import pydbgpd_helper
from debugger_exception import debugger_exception
class debugger:
_breakpoint_list = {}
_debugger_helper = None
_status = -1 #-1 Uninit 0 Init 1 Listen 2 debug
_debug_thread = None
_accept_user_action_event = None
_state_machine_stop_event = None
_pre_variables = {}
_cur_variables = {}
_breakpoint_type_keys = {
"line" : ["filename","lineno","type"],
"call" : ["function","type"],
"return" : ["function","type"],
"exception" : ["exception","type"],
"conditional" : ["filename","lineno","expression","type"],
"watch" : [],
}
_variable_watch = False
_all_stack_parameters = False
_ide_key = "PhpDebugServer"
def __init__(self):
self._state_machine_stop_event = threading.Event()
self._state_machine_stop_event.clear()
self._accept_user_action_event = threading.Event()
self._accept_user_action_event.set()
pass
def __del__(self):
pass
def set_settings(self, settings):
self._variable_watch = settings['variable_watch']
self._all_stack_parameters = settings['all_stack_parameters']
self._ide_key = settings['ide_key']
def do(self, action, param):
actions = {"run":[self.run, "json", True, True],
"query":[self.query, "json", True, True],
"step_in":[self.step_in, "json", True, True],
"step_out":[self.step_out, "json", True, True],
"step_over":[self.step_over, "json", True, True],
"stack_get":[self.stack_get, "json", True, True],
"save_request":[self.save_request, "json", True, True],
"get_variables":[self.get_variables, "json", True, True],
"modify_variable":[self.modify_variable, "json", True, True],
"get_cur_stack_info":[self.get_cur_stack_info, "json", True, True],
"get_variable_watch":[self.get_variable_watch, "json", True, True],
"select_first_session":[self.select_first_session, "json", True, True],
"stop_debug":[self.stop_debug, "json", False, False],
"start_debug":[self.start_debug, "json", False, False],
"breakpoint_list":[self.breakpoint_list, "json", False, False],
"get_debugger_status":[self.get_debugger_status, "json", False, False],
"get_file_breakpoint_lineno":[self.get_file_breakpoint_lineno, "json", False, False],
"add_breakpoint":[self.add_breakpoint, "json", False, True],
"remove_breakpoint":[self.remove_breakpoint, "json", False, True],
}
if action not in actions.keys():
raise debugger_exception("action:" + action + " is invalid")
if False == isinstance(param,basestring):
raise debugger_exception("param is invalid")
if actions[action][3] and False == self._accept_user_action_event.isSet():
return ({"ret":0}, "json")
if actions[action][2]:
if not self._debugger_helper:
return ({"ret":0}, "json")
return (actions[action][0](param), actions[action][1])
def get_debugger_status(self,param):
if not self._debugger_helper:
return {"ret":0}
return self._debugger_helper.do("status","")
def start_debug(self,param):
self._reset_all_breakpoint()
if not self._debugger_helper:
self._debugger_helper = pydbgpd_helper()
self._debugger_helper.start_debugger()
listen_ret = self._debugger_helper.do("start_listen", "")
if listen_ret["ret"] == 0:
return {"ret" :0}
if not self._debug_thread:
self._state_machine_stop_event.clear()
self._debug_thread = Thread(target=self._debug_routine)
self._debug_thread.daemon = True # thread dies with the program
self._debug_thread.start()
return {"ret" :1}
def stop_debug(self,param):
if self._debug_thread:
while self._debug_thread.is_alive():
self._state_machine_stop_event.set()
time.sleep(0.5)
self._debug_thread = None
if self._debugger_helper:
self._debugger_helper.stop_debugger()
del self._debugger_helper
self._debugger_helper = None
self._reset_all_breakpoint()
return {"ret" :1}
def _reset_all_breakpoint(self):
for breakpoint_key in self._breakpoint_list.keys():
if self._breakpoint_list[breakpoint_key]["type"] not in self._breakpoint_type_keys.keys():
del self._breakpoint_list[breakpoint_key]
continue
for key in self._breakpoint_list[breakpoint_key].keys():
if key not in self._breakpoint_type_keys[self._breakpoint_list[breakpoint_key]["type"]]:
del self._breakpoint_list[breakpoint_key][key]
self._breakpoint_list[breakpoint_key]["state"] = "disable"
def _add_all_breakpoint(self):
update_keys = ["id", "state", "hit_value", "hit_condition", "hit_count"]
for (breakpoint_key,breakpoint_value) in self._breakpoint_list.items():
add_ret = self._debugger_helper.do("add_breakpoint", breakpoint_value)
if add_ret["ret"] == 1:
for item in update_keys:
if item in add_ret["breakpoint"].keys():
self._breakpoint_list[breakpoint_key][item] = add_ret["breakpoint"][item]
def _debug_routine(self):
while False == self._state_machine_stop_event.isSet():
time.sleep(0.3)
sessions = self._debugger_helper.do("sessions", "")
for session_id in sessions:
self._debug_session(session_id)
if self._state_machine_stop_event.isSet():
break
self._debugger_helper.do("quit","")
self._reset_all_breakpoint()
def _debug_session(self,session_id):
select_ret = self._debugger_helper.do("select", session_id)
if select_ret["ret"] == 0:
print "select error"
return
self._accept_user_action_event.clear()
self._reset_all_breakpoint()
self._add_all_breakpoint()
status_ret = self._debugger_helper.do("status","")
while False == self._state_machine_stop_event.isSet():
if status_ret["ret"] == 0:
break
if status_ret["status"] == 1:
#self._accept_user_action_event.set()
if len(self._breakpoint_list):
self._debugger_helper.do("run","")
else:
self._debugger_helper.do("step_over","")
if status_ret["status"] == 2:
self._accept_user_action_event.set()
time.sleep(5)
if status_ret["status"] == 3:
self._accept_user_action_event.clear()
self._debugger_helper.do("run","")
if status_ret["status"] == 4:
self._accept_user_action_event.clear()
break
status_ret = self._debugger_helper.do("status","")
time.sleep(1)
if status_ret["status"] != 0:
self._debugger_helper.do("exit","")
self._pre_variables = {}
self._cur_variables = {}
self._accept_user_action_event.set()
def query(self,param):
param_de = base64.b64decode(param)
param_json = json.loads(param_de)
if "cmd" not in param_json.keys():
raise debugger_exception("query cmd is needed");
return self._debugger_helper.query(base64.b64decode(param_json["cmd"]))
def _generate_breakpoint_key(self,breakpoint_info):
if "type" not in breakpoint_info.keys():
raise debugger_exception("_generate_breakpoint_key param error: no type");
if breakpoint_info["type"] not in self._breakpoint_type_keys.keys():
raise debugger_exception("_generate_breakpoint_key param error: type[" + breakpoint_info["type"] + "] is invalid");
breakpoint_info_key = ""
for item in self._breakpoint_type_keys[breakpoint_info["type"]]:
breakpoint_info_key = breakpoint_info_key + item + ":" + breakpoint_info[item] + " "
m1 = md5.new()
m1.update(breakpoint_info_key)
return m1.hexdigest()
def _get_breakpoint_info(self,param_json):
if "type" not in param_json.keys():
raise debugger_exception("_get_breakpoint_info param error: no type");
if param_json["type"] not in self._breakpoint_type_keys.keys():
raise debugger_exception("_get_breakpoint_info param error: type[" + param_json["type"] + "] is invalid");
breakpoint_info = {}
for item in self._breakpoint_type_keys[param_json["type"]]:
if (item == "filename"):
breakpoint_info[item] = base64.b64decode(param_json[item])
else:
breakpoint_info[item] = param_json[item]
breakpoint_info_key = self._generate_breakpoint_key(breakpoint_info)
return (breakpoint_info_key, breakpoint_info)
def add_breakpoint(self,param):
update_keys = ["id", "state", "hit_value", "hit_condition", "hit_count"]
param_de = base64.b64decode(param)
param_json = json.loads(param_de)
(breakpoint_key, breakpoint_value) = self._get_breakpoint_info(param_json)
#breakpoint_set_keys = ["type", "filename", "lineno", "function", "state", "exception", "expression", "temporary", "hit_count", "hit_value", "hit_condition"]
if breakpoint_key not in self._breakpoint_list.keys():
self._breakpoint_list[breakpoint_key] = breakpoint_value
self._breakpoint_list[breakpoint_key]["state"] = "disable"
if self._debugger_helper and self._debugger_helper.is_session():
add_ret = self._debugger_helper.do("add_breakpoint", breakpoint_value)
if add_ret["ret"] == 1:
for item in update_keys:
if item in add_ret["breakpoint"].keys():
self._breakpoint_list[breakpoint_key][item] = add_ret["breakpoint"][item]
return {"ret":1}
else:
return {"ret":0}
return {"ret":1}
def get_breakpoint_info_by_param(self,param_json):
breakpoint_key = ""
breakpoint_value = {}
if "itemid" in param_json.keys():
breakpoint_key = param_json["itemid"]
if breakpoint_key in self._breakpoint_list.keys():
breakpoint_value = self._breakpoint_list[breakpoint_key]
else:
(breakpoint_key, breakpoint_value_t) = self._get_breakpoint_info(param_json)
if breakpoint_key in self._breakpoint_list.keys():
breakpoint_value = self._breakpoint_list[breakpoint_key]
return (breakpoint_key,breakpoint_value)
def remove_breakpoint(self,param):
param_de = base64.b64decode(param)
param_json = json.loads(param_de)
print self._breakpoint_list
(breakpoint_key, breakpoint_value) = self.get_breakpoint_info_by_param(param_json)
print breakpoint_key, breakpoint_value
if self._debugger_helper and self._debugger_helper.is_session():
if breakpoint_key in self._breakpoint_list.keys():
remove_ret = self._debugger_helper.do("remove_breakpoint", breakpoint_value["id"])
if remove_ret["ret"] == 1:
del self._breakpoint_list[breakpoint_key]
return {"ret":1}
else:
return {"ret":0}
else:
if breakpoint_key in self._breakpoint_list.keys():
del self._breakpoint_list[breakpoint_key]
return {"ret":1}
def modify_breakpoint(self):
update_keys = ["id", "state", "hit_value", "hit_condition", "hit_count"]
for (key, value) in self._breakpoint_list.items():
add_ret = self._debugger_helper.do("add_breakpoint", value)
if add_ret["ret"] == 1:
for item in update_keys:
if item in add_ret["breakpoint"].keys():
self._breakpoint_list[key][item] = add_ret["breakpoint"][item]
else:
self._breakpoint_list[key]["state"] = "disable"
def select_first_session(self,param):
ret = self._debugger_helper.do("select_first_session", param)
if ret["ret"] == 1:
self.modify_breakpoint()
self._status = 2
return ret
def get_cur_stack_info(self,param):
return self._debugger_helper.do("get_cur_stack_info", param)
def step_over(self,param):
if self._variable_watch:
self._pre_variables = self.get_variables("")
ret = self._debugger_helper.do("step_over", param)
self._cur_variables = self.get_variables("")
else:
ret = self._debugger_helper.do("step_over", param)
return ret
def step_in(self,param):
if self._variable_watch:
self._pre_variables = self.get_variables("")
ret = self._debugger_helper.do("step_in", param)
self._cur_variables = self.get_variables("")
else:
ret = self._debugger_helper.do("step_in", param)
return ret
def step_out(self,param):
if self._variable_watch:
self._pre_variables = self.get_variables("")
ret = self._debugger_helper.do("step_out", param)
self._cur_variables = self.get_variables("")
else:
ret = self._debugger_helper.do("step_out", param)
return ret
def run(self,param):
if self._variable_watch:
self._pre_variables = self.get_variables("")
ret = self._debugger_helper.do("run", param)
self._cur_variables = self.get_variables("")
else:
ret = self._debugger_helper.do("run", param)
return ret
def get_variable_watch(self,param):
if False == self._variable_watch:
return {"ret":0}
param_de = base64.b64decode(param)
pre_data = self._search_variable(self._pre_variables, param_de)
cur_data = self._search_variable(self._cur_variables, param_de)
m1 = md5.new()
m1.update(param_de)
id = m1.hexdigest()
new_data = {"pre":pre_data,"cur":cur_data}
return {"ret":1, "id":id, "name":param_de, "data":new_data}
def _search_variable(self,data,name):
if "ret" not in data.keys():
return {"type":'uninitialized', "value":"",'name':name}
if "data" not in data.keys():
return {"type":'uninitialized', "value":"",'name':name}
if data["ret"] == 0:
return {"type":'uninitialized', "value":"",'name':name}
for (itemk,itemv) in data["data"].items():
for (itemkk,itemvv) in itemv.items():
for item in itemvv:
if item["name"] == name:
return item
def stack_get(self,param):
return self._debugger_helper.do("stack_get", param)
def get_variables(self,param):
if self._all_stack_parameters:
return self._debugger_helper.do("get_variables", param)
else:
return self._debugger_helper.do("get_cur_stack_variables", param)
def breakpoint_list(self,param):
breakpoint_list_info = []
for (key,value) in self._breakpoint_list.items():
item = value
item["itemid"] = key
breakpoint_list_info.append(item)
return breakpoint_list_info
def get_file_breakpoint_lineno(self,param):
param_de = base64.b64decode(param)
param_json = json.loads(param_de)
if "filename" not in param_json.keys():
raise debugger_exception("param is invalid")
breakpoint_list_lineno = []
for (key,value) in self._breakpoint_list.items():
if "filename" not in value.keys():
continue
if param_json["filename"] != value["filename"]:
continue
if "lineno" in value.keys():
breakpoint_list_lineno.append(value["lineno"])
return {"ret":1, "breakpoint_list_lineno":breakpoint_list_lineno}
def modify_variable(self,param):
param_de = base64.b64decode(param)
param_json = json.loads(param_de)
if "value" not in param_json.keys() or "name" not in param_json.keys():
return {"ret":0}
exucte_cmd = param_json["name"] + "=" + base64.b64decode(param_json["value"])
data = self._debugger_helper.do("eval", exucte_cmd)
return data
def save_request(self,param):
param_de = base64.b64decode(param)
db = request_db()
if db.is_request_name_exist(param_de):
return {"ret":0, "msg":"name exist"}
variables = self.get_variables("")
get_data_org = self._search_variable(variables, "$_GET")
post_data_org = self._search_variable(variables, "$_POST")
get_data_new = {}
if "value" in get_data_org.keys():
get_data_new = self.generate_get_request_map(get_data_org["value"], "$_GET['", "']")
post_data_new = {}
if "value" in post_data_org.keys():
post_data_new = self.generate_get_request_map(post_data_org["value"], "$_POST['", "']")
all_data = {"get":get_data_new, "post":post_data_new, "url":""}
db.add_request(param_de, all_data)
return {"ret":1}
def generate_get_request_map(self, data, start, end):
json_data = json.loads(data)
new_data = {}
for (key,value) in json_data.items():
real_key = self._get_full_name_varaibles_short_name(key, start, end)
if len(real_key):
new_data[real_key] = value
return new_data
def _get_full_name_varaibles_short_name(self, data, start, end):
index = data.find(start)
if -1 == index:
return ""
index = index + len(start)
finish = data.rfind(end)
if -1 == finish:
finish = len(data)
return data[index:finish]
def test_no_action():
d = debugger()
d.do("no_action", "")
def test_param_invalid():
d = debugger()
d.do("add_breakpoint", [])
def test_add_breakpoint_no_type():
param = {"line":1}
param_en = base64.b64encode(json.dumps(param))
d = debugger()
d.do("add_breakpoint", param_en)
def test_add_breakpoint_type_invalid():
param = {"type": "no type","line":1}
param_en = base64.b64encode(json.dumps(param))
d = debugger()
d.do("add_breakpoint", param_en)
def run_error(f):
try:
eval(f)()
except debugger_exception as e:
return True
#print e.value
return False
if __name__ == "__main__":
error_handles = ["test_no_action", "test_param_invalid", "test_add_breakpoint_no_type", "test_add_breakpoint_type_invalid"]
for item in error_handles:
if False == run_error(item):
print item + "run error"
| |
# Copyright 2014 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#
# This code was based on a post at http://www.shysecurity.com/posts/Remote%20Interactive%20Python by
# kelson@shysecurity.com.
#
# This is used to enable remote interaction with the python process for remote debugging. It should
# be used with extreme care since there is no security in the connection model right now.
#
# author: Steven Czerwinski <czerwin@scalyr.com>
from __future__ import absolute_import
__author__ = "czerwin@scalyr.com"
import sys
import socket
import scalyr_agent.scalyr_logging as scalyr_logging
from scalyr_agent.util import StoppableThread
log = scalyr_logging.getLogger(__name__)
class SocketWrapper(object):
"""Wraps a socket in order to implement the necessary functions to be as a stdin.
This is necessary to cast the socket as stdin to be used by the code.iteract method.
It implements only the methods necessary to be used in place of stdin.
"""
def __init__(self, my_socket):
"""Initializes the wrapper.
@param my_socket: The socket to wrap
@type my_socket: socket.socket
@return:
@rtype:
"""
self.__socket = my_socket
def read(self, max_bytes):
"""Reads bytes from the underlying connection.
This will bock until some bytes can be returned.
@param max_bytes: The maximum number of bytes to read.
@type max_bytes: int
@return: The bytes read as a string.
@rtype: str
"""
while True:
try:
return self.__socket.recv(max_bytes)
except socket.timeout:
continue
except socket.error as e:
if e.errno == 35:
continue
raise e
def write(self, input_str):
"""Writes the string to the underlying socket.
@param input_str: The string to send.
@type input_str: six.binary_type
"""
return self.__socket.send(input_str)
def readline(self):
"""Reads an entire line from the underlying socket, blocking until a line is received.
@return: The line
@rtype: six.binary_type
"""
data = ""
while True:
try:
iota = self.read(1)
except socket.timeout:
continue
if not iota:
break
else:
data += iota
if iota in "\n":
break
return data
class DebugServer(StoppableThread):
"""A HTTP Server that accepts connections from local host to an interactive Python shell.
This can be used for debugging purposes. The interactive Python shell allows you to inspect the state
of the running Python process including global variables, etc.
This currently creates a new thread for every incoming connection.
"""
def __init__(self, local=None, host="localhost", port=2000):
self.__server_socket = None
# The open connections.
self.__connections = []
# Any local variables to set for the interactive shell. This is a dict.
self.__local = local
# The IP address to server the connections from.
self.__host = host
# The port.
self.__port = port
StoppableThread.__init__(self, "debug server thread")
def run(self):
"""Run the server, accepting new connections and handling them.
This method does not return until the thread has been stopped.
"""
# Run until the thread has been stopped.
while self._run_state.is_running():
# Set up the server socket.
if self.__server_socket is None:
self.__setup_server_socket()
# block until we get a new connection.
session = self.__accept_connection()
if session is not None:
self.__connections.append(session)
session.start()
# Clean up any connections that have been closed.
if len(self.__connections) > 0:
remaining_connections = []
for connection in self.__connections:
if connection.isAlive():
remaining_connections.append(connection)
else:
connection.join(1)
self.__connections = remaining_connections
def __accept_connection(self):
"""Blocks until a new connection is made.
@return The new connection.
@rtype: DebugConnection
"""
if self.__server_socket is None:
return None
# noinspection PyBroadException
# TODO: Move this catch out of here.
try:
client, addr = self.__server_socket.accept()
return DebugConnection(self.__local, client, self.__host, self.__port)
except socket.timeout:
return None
except Exception:
log.exception(
"Failure while accepting new debug connection. Resetting socket"
)
self.__close_socket()
return None
def __setup_server_socket(self):
"""Create the server socket, binding to the appropriate address."""
if self.__server_socket is not None:
return
# noinspection PyBroadException
# TODO: Move this catch out of here.
try:
self.__server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.__server_socket.settimeout(1)
self.__server_socket.bind((self.__host, self.__port))
self.__server_socket.listen(5)
except Exception:
log.exception(
"Failure while accepting new debug connection. Resetting socket"
)
self.__close_socket()
def __close_socket(self):
"""Close the underlying server socket."""
if self.__server_socket is not None:
try:
self.__server_socket.shutdown(socket.SHUT_RDWR)
self.__server_socket = None
except socket.error:
self.__server_socket = None
class DebugConnection(StoppableThread):
"""Handles a single incoming connection on the server, connecting it to the interactive Python shell.
This is run as a thread.
"""
def __init__(self, local, client_connection, host, port):
"""Initializes the connection.
@param local: The dict of local variables to populate into the envinroment the interactive shell is run in.
@param client_connection: The network connection
@param host: the client's IP address
@param port: the client's port
@type local: dict
@type client_connection:
@type host: six.text_type
@type port: int
"""
self.__local = local
self.__client_connection = client_connection
self.__host = host
self.__port = port
StoppableThread.__init__(self, "Debug connection thread")
def run(self):
"""Handle the incoming connection, connecting it to the interactive shell.
Will terminate either when the connection closes or the stop method in this thread is invoked.
"""
import traceback
# Wrap the socket so we can tie it to stdin,stdout.
link = SocketWrapper(self.__client_connection)
banner = "connected to %s:%d" % (self.__host, self.__port)
banner += "\nStack Trace\n"
banner += "----------------------------------------\n"
banner += "".join(traceback.format_stack()[:-2])
banner += "----------------------------------------\n"
# In order to hook it up shell, we need to set the stdin,stdout,stderr global variables. Luckily, no
# other agent really should using these variables when running as daemon, so we should not have any
# race conditiosn before we set it back.
orig_fds = sys.stdin, sys.stdout, sys.stderr
sys.stdin, sys.stdout, sys.stderr = link, link, link
try:
self.__interactive_shell(banner)
finally:
# Restore the original values.
sys.stdin, sys.stdout, sys.stderr = orig_fds
def __interactive_shell(self, banner="interactive shell"):
"""Run an interactive shell.
@param banner: The banner (message) to show the user when the shell starts up
@type banner: str
"""
import code
if self.__local:
local = dict(globals(), **self.__local)
else:
local = globals()
code.interact(banner, local=local)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reverse_sequence_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import benchmark
from tensorflow.python.platform import test
class WhereOpTest(test.TestCase):
def _testWhere(self, x, truth, expected_err_re=None):
with self.cached_session(use_gpu=True):
ans = array_ops.where(x)
self.assertEqual([None, x.ndim], ans.get_shape().as_list())
if expected_err_re is None:
tf_ans = self.evaluate(ans)
self.assertAllClose(tf_ans, truth, atol=1e-10)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def testWrongNumbers(self):
with self.session(use_gpu=True):
with self.assertRaises(ValueError):
array_ops.where([False, True], [1, 2], None)
with self.assertRaises(ValueError):
array_ops.where([False, True], None, [1, 2])
@test_util.run_deprecated_v1
def testBasicVec(self):
x = np.asarray([True, False])
truth = np.asarray([[0]], dtype=np.int64)
self._testWhere(x, truth)
x = np.asarray([False, True, False])
truth = np.asarray([[1]], dtype=np.int64)
self._testWhere(x, truth)
x = np.asarray([False, False, True, False, True])
truth = np.asarray([[2], [4]], dtype=np.int64)
self._testWhere(x, truth)
@test_util.run_deprecated_v1
def testRandomVec(self):
x = np.random.rand(1000000) > 0.5
truth = np.vstack([np.where(x)[0].astype(np.int64)]).T
self._testWhere(x, truth)
@test_util.run_deprecated_v1
def testBasicMat(self):
x = np.asarray([[True, False], [True, False]])
# Ensure RowMajor mode
truth = np.asarray([[0, 0], [1, 0]], dtype=np.int64)
self._testWhere(x, truth)
@test_util.run_deprecated_v1
def testBasic3Tensor(self):
x = np.asarray([[[True, False], [True, False]],
[[False, True], [False, True]],
[[False, False], [False, True]]])
# Ensure RowMajor mode
truth = np.asarray(
[[0, 0, 0], [0, 1, 0], [1, 0, 1], [1, 1, 1], [2, 1, 1]], dtype=np.int64)
self._testWhere(x, truth)
def _testRandom(self, dtype, expected_err_re=None):
shape = [127, 33, 53]
x = np.random.randn(*shape) + 1j * np.random.randn(*shape)
x = (np.random.randn(*shape) > 0).astype(dtype)
truth = np.where(np.abs(x) > 0) # Tuples of indices by axis.
truth = np.vstack(truth).T # Convert to [num_true, indices].
self._testWhere(x, truth, expected_err_re)
@test_util.run_deprecated_v1
def testRandomBool(self):
self._testRandom(np.bool)
@test_util.run_deprecated_v1
def testRandomInt32(self):
self._testRandom(np.int32)
@test_util.run_deprecated_v1
def testRandomInt64(self):
self._testRandom(np.int64)
@test_util.run_deprecated_v1
def testRandomFloat(self):
self._testRandom(np.float32)
@test_util.run_deprecated_v1
def testRandomDouble(self):
self._testRandom(np.float64)
@test_util.run_deprecated_v1
def testRandomComplex64(self):
self._testRandom(np.complex64)
@test_util.run_deprecated_v1
def testRandomComplex128(self):
self._testRandom(np.complex128)
@test_util.run_deprecated_v1
def testRandomUint8(self):
self._testRandom(np.uint8)
@test_util.run_deprecated_v1
def testRandomInt8(self):
self._testRandom(np.int8)
@test_util.run_deprecated_v1
def testRandomInt16(self):
self._testRandom(np.int16)
@test_util.run_deprecated_v1
def testThreeArgument(self):
x = np.array([[-2, 3, -1], [1, -3, -3]])
np_val = np.where(x > 0, x * x, -x)
with self.session(use_gpu=True):
tf_val = array_ops.where(constant_op.constant(x) > 0, x * x, -x).eval()
self.assertAllEqual(tf_val, np_val)
@test_util.run_deprecated_v1
def testBatchSelect(self):
x = np.array([[-2, 3, -1] * 64, [1, -3, -3] * 64] * 8192) # [16384, 192]
c_mat = np.array([[False] * 192, [True] * 192] * 8192) # [16384, 192]
c_vec = np.array([False, True] * 8192) # [16384]
np_val = np.where(c_mat, x * x, -x)
with self.session(use_gpu=True):
tf_val = array_ops.where(c_vec, x * x, -x).eval()
self.assertAllEqual(tf_val, np_val)
class WhereBenchmark(test.Benchmark):
def benchmarkWhere(self):
for (m, n, p, use_gpu) in itertools.product(
[10],
[10, 100, 1000, 10000, 100000, 1000000],
[0.01, 0.5, 0.99],
[False, True]):
name = "m_%d_n_%d_p_%g_use_gpu_%s" % (m, n, p, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x = random_ops.random_uniform((m, n), dtype=dtypes.float32) <= p
v = resource_variable_ops.ResourceVariable(x)
op = array_ops.where(v)
with session.Session(config=benchmark.benchmark_config()) as sess:
v.initializer.run()
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
gb_processed_input = m * n / 1.0e9
# approximate size of output: m*n*p int64s for each axis.
gb_processed_output = 2 * 8 * m * n * p / 1.0e9
gb_processed = gb_processed_input + gb_processed_output
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
def benchmarkBatchSelect(self):
for (m, n, use_gpu) in itertools.product([1000, 10000, 100000],
[10, 100, 1000], [False, True]):
name = "m_%d_n_%d_use_gpu_%s" % (m, n, use_gpu)
device = "/%s:0" % ("gpu" if use_gpu else "cpu")
with ops.Graph().as_default():
with ops.device(device):
x_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
y_gen = random_ops.random_uniform([m, n], dtype=dtypes.float32)
c_gen = random_ops.random_uniform([m], dtype=dtypes.float32) <= 0.5
x = resource_variable_ops.ResourceVariable(x_gen)
y = resource_variable_ops.ResourceVariable(y_gen)
c = resource_variable_ops.ResourceVariable(c_gen)
op = array_ops.where(c, x, y)
with session.Session(config=benchmark.benchmark_config()) as sess:
x.initializer.run()
y.initializer.run()
c.initializer.run()
r = self.run_op_benchmark(sess, op, min_iters=100, name=name)
# approximate size of output: m*n*2 floats for each axis.
gb_processed = m * n * 8 / 1.0e9
throughput = gb_processed / r["wall_time"]
print("Benchmark: %s \t wall_time: %0.03g s \t "
"Throughput: %0.03g GB/s" % (name, r["wall_time"], throughput))
sys.stdout.flush()
if __name__ == "__main__":
test.main()
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Post a try job request via HTTP to the Tryserver to produce build."""
import getpass
import json
import optparse
import os
import sys
import urllib
import urllib2
# Link to get JSON data of builds
BUILDER_JSON_URL = ('%(server_url)s/json/builders/%(bot_name)s/builds/'
'%(build_num)s?as_text=1&filter=0')
# Link to display build steps
BUILDER_HTML_URL = ('%(server_url)s/builders/%(bot_name)s/builds/%(build_num)s')
# Tryserver buildbots status page
TRY_SERVER_URL = 'http://build.chromium.org/p/tryserver.chromium.perf'
# Hostname of the tryserver where perf bisect builders are hosted. This is used
# for posting build request to tryserver.
BISECT_BUILDER_HOST = 'master4.golo.chromium.org'
# 'try_job_port' on tryserver to post build request.
BISECT_BUILDER_PORT = 8341
# From buildbot.status.builder.
# See: http://docs.buildbot.net/current/developer/results.html
SUCCESS, WARNINGS, FAILURE, SKIPPED, EXCEPTION, RETRY, TRYPENDING = range(7)
# Status codes that can be returned by the GetBuildStatus method.
OK = (SUCCESS, WARNINGS)
# Indicates build failure.
FAILED = (FAILURE, EXCEPTION, SKIPPED)
# Inidcates build in progress or in pending queue.
PENDING = (RETRY, TRYPENDING)
class ServerAccessError(Exception):
def __str__(self):
return '%s\nSorry, cannot connect to server.' % self.args[0]
def PostTryJob(url_params):
"""Sends a build request to the server using the HTTP protocol.
Args:
url_params: A dictionary of query parameters to be sent in the request.
In order to post build request to try server, this dictionary
should contain information for following keys:
'host': Hostname of the try server.
'port': Port of the try server.
'revision': SVN Revision to build.
'bot': Name of builder bot which would be used.
Returns:
True if the request is posted successfully. Otherwise throws an exception.
"""
# Parse url parameters to be sent to Try server.
if not url_params.get('host'):
raise ValueError('Hostname of server to connect is missing.')
if not url_params.get('port'):
raise ValueError('Port of server to connect is missing.')
if not url_params.get('revision'):
raise ValueError('Missing revision details. Please specify revision'
' information.')
if not url_params.get('bot'):
raise ValueError('Missing bot details. Please specify bot information.')
# Pop 'host' and 'port' to avoid passing them as query params.
url = 'http://%s:%s/send_try_patch' % (url_params.pop('host'),
url_params.pop('port'))
print 'Sending by HTTP'
query_params = '&'.join('%s=%s' % (k, v) for k, v in url_params.iteritems())
print 'url: %s?%s' % (url, query_params)
connection = None
try:
print 'Opening connection...'
connection = urllib2.urlopen(url, urllib.urlencode(url_params))
print 'Done, request sent to server to produce build.'
except IOError, e:
raise ServerAccessError('%s is unaccessible. Reason: %s' % (url, e))
if not connection:
raise ServerAccessError('%s is unaccessible.' % url)
response = connection.read()
print 'Received %s from server' % response
if response != 'OK':
raise ServerAccessError('%s is unaccessible. Got:\n%s' % (url, response))
return True
def _IsBuildRunning(build_data):
"""Checks whether the build is in progress on buildbot.
Presence of currentStep element in build JSON indicates build is in progress.
Args:
build_data: A dictionary with build data, loaded from buildbot JSON API.
Returns:
True if build is in progress, otherwise False.
"""
current_step = build_data.get('currentStep')
if (current_step and current_step.get('isStarted') and
current_step.get('results') is None):
return True
return False
def _IsBuildFailed(build_data):
"""Checks whether the build failed on buildbot.
Sometime build status is marked as failed even though compile and packaging
steps are successful. This may happen due to some intermediate steps of less
importance such as gclient revert, generate_telemetry_profile are failed.
Therefore we do an addition check to confirm if build was successful by
calling _IsBuildSuccessful.
Args:
build_data: A dictionary with build data, loaded from buildbot JSON API.
Returns:
True if revision is failed build, otherwise False.
"""
if (build_data.get('results') in FAILED and
not _IsBuildSuccessful(build_data)):
return True
return False
def _IsBuildSuccessful(build_data):
"""Checks whether the build succeeded on buildbot.
We treat build as successful if the package_build step is completed without
any error i.e., when results attribute of the this step has value 0 or 1
in its first element.
Args:
build_data: A dictionary with build data, loaded from buildbot JSON API.
Returns:
True if revision is successfully build, otherwise False.
"""
if build_data.get('steps'):
for item in build_data.get('steps'):
# The 'results' attribute of each step consists of two elements,
# results[0]: This represents the status of build step.
# See: http://docs.buildbot.net/current/developer/results.html
# results[1]: List of items, contains text if step fails, otherwise empty.
if (item.get('name') == 'package_build' and
item.get('isFinished') and
item.get('results')[0] in OK):
return True
return False
def _FetchBuilderData(builder_url):
"""Fetches JSON data for the all the builds from the tryserver.
Args:
builder_url: A tryserver URL to fetch builds information.
Returns:
A dictionary with information of all build on the tryserver.
"""
data = None
try:
url = urllib2.urlopen(builder_url)
except urllib2.URLError, e:
print ('urllib2.urlopen error %s, waterfall status page down.[%s]' % (
builder_url, str(e)))
return None
if url is not None:
try:
data = url.read()
except IOError, e:
print 'urllib2 file object read error %s, [%s].' % (builder_url, str(e))
return data
def _GetBuildData(buildbot_url):
"""Gets build information for the given build id from the tryserver.
Args:
buildbot_url: A tryserver URL to fetch build information.
Returns:
A dictionary with build information if build exists, otherwise None.
"""
builds_json = _FetchBuilderData(buildbot_url)
if builds_json:
return json.loads(builds_json)
return None
def _GetBuildBotUrl(builder_host, builder_port):
"""Gets build bot URL based on the host and port of the builders.
Note: All bisect builder bots are hosted on tryserver.chromium i.e.,
on master4:8328, since we cannot access tryserver using host and port
number directly, we use tryserver URL.
Args:
builder_host: Hostname of the server where the builder is hosted.
builder_port: Port number of ther server where the builder is hosted.
Returns:
URL of the buildbot as a string.
"""
if (builder_host == BISECT_BUILDER_HOST and
builder_port == BISECT_BUILDER_PORT):
return TRY_SERVER_URL
else:
return 'http://%s:%s' % (builder_host, builder_port)
def GetBuildStatus(build_num, bot_name, builder_host, builder_port):
"""Gets build status from the buildbot status page for a given build number.
Args:
build_num: A build number on tryserver to determine its status.
bot_name: Name of the bot where the build information is scanned.
builder_host: Hostname of the server where the builder is hosted.
builder_port: Port number of ther server where the builder is hosted.
Returns:
A tuple consists of build status (SUCCESS, FAILED or PENDING) and a link
to build status page on the waterfall.
"""
results_url = None
if build_num:
# Gets the buildbot url for the given host and port.
server_url = _GetBuildBotUrl(builder_host, builder_port)
buildbot_url = BUILDER_JSON_URL % {'server_url': server_url,
'bot_name': bot_name,
'build_num': build_num
}
build_data = _GetBuildData(buildbot_url)
if build_data:
# Link to build on the buildbot showing status of build steps.
results_url = BUILDER_HTML_URL % {'server_url': server_url,
'bot_name': bot_name,
'build_num': build_num
}
if _IsBuildFailed(build_data):
return (FAILED, results_url)
elif _IsBuildSuccessful(build_data):
return (OK, results_url)
return (PENDING, results_url)
def GetBuildNumFromBuilder(build_reason, bot_name, builder_host, builder_port):
"""Gets build number on build status page for a given build reason.
It parses the JSON data from buildbot page and collect basic information
about the all the builds and then this uniquely identifies the build based
on the 'reason' attribute in builds's JSON data.
The 'reason' attribute set while a build request is posted, and same is used
to identify the build on status page.
Args:
build_reason: A unique build name set to build on tryserver.
bot_name: Name of the bot where the build information is scanned.
builder_host: Hostname of the server where the builder is hosted.
builder_port: Port number of ther server where the builder is hosted.
Returns:
A build number as a string if found, otherwise None.
"""
# Gets the buildbot url for the given host and port.
server_url = _GetBuildBotUrl(builder_host, builder_port)
buildbot_url = BUILDER_JSON_URL % {'server_url': server_url,
'bot_name': bot_name,
'build_num': '_all'
}
builds_json = _FetchBuilderData(buildbot_url)
if builds_json:
builds_data = json.loads(builds_json)
for current_build in builds_data:
if builds_data[current_build].get('reason') == build_reason:
return builds_data[current_build].get('number')
return None
def _GetQueryParams(options):
"""Parses common query parameters which will be passed to PostTryJob.
Args:
options: The options object parsed from the command line.
Returns:
A dictionary consists of query parameters.
"""
values = {'host': options.host,
'port': options.port,
'user': options.user,
'name': options.name
}
if options.email:
values['email'] = options.email
if options.revision:
values['revision'] = options.revision
if options.root:
values['root'] = options.root
if options.bot:
values['bot'] = options.bot
if options.patch:
values['patch'] = options.patch
return values
def _GenParser():
"""Parses the command line for posting build request."""
usage = ('%prog [options]\n'
'Post a build request to the try server for the given revision.\n')
parser = optparse.OptionParser(usage=usage)
parser.add_option('-H', '--host',
help='Host address of the try server.')
parser.add_option('-P', '--port', type='int',
help='HTTP port of the try server.')
parser.add_option('-u', '--user', default=getpass.getuser(),
dest='user',
help='Owner user name [default: %default]')
parser.add_option('-e', '--email',
default=os.environ.get('TRYBOT_RESULTS_EMAIL_ADDRESS',
os.environ.get('EMAIL_ADDRESS')),
help=('Email address where to send the results. Use either '
'the TRYBOT_RESULTS_EMAIL_ADDRESS environment '
'variable or EMAIL_ADDRESS to set the email address '
'the try bots report results to [default: %default]'))
parser.add_option('-n', '--name',
default='try_job_http',
help='Descriptive name of the try job')
parser.add_option('-b', '--bot',
help=('IMPORTANT: specify ONE builder per run is supported.'
'Run script for each builders separately.'))
parser.add_option('-r', '--revision',
help=('Revision to use for the try job; default: the '
'revision will be determined by the try server; see '
'its waterfall for more info'))
parser.add_option('--root',
help=('Root to use for the patch; base subdirectory for '
'patch created in a subdirectory'))
parser.add_option('--patch',
help='Patch information.')
return parser
def Main(argv):
parser = _GenParser()
options, _ = parser.parse_args()
if not options.host:
raise ServerAccessError('Please use the --host option to specify the try '
'server host to connect to.')
if not options.port:
raise ServerAccessError('Please use the --port option to specify the try '
'server port to connect to.')
params = _GetQueryParams(options)
PostTryJob(params)
if __name__ == '__main__':
sys.exit(Main(sys.argv))
| |
#!/usr/bin/env python
"""Command-line program to read a fto statistics csv and output a png graph"""
import argparse
import errno
import logging
import sys
from operator import itemgetter
import io
import warnings
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
# pylint: disable=unused-import
from typing import Iterable, Hashable, Any, Union, IO, Optional # NOQA
import pandas as pd
import requests
import matplotlib
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# The backend choice must be called before pyplot import
matplotlib.use('Agg')
import matplotlib.pyplot as plt
__all__ = ['main', 'generate_figure', 'load_dataframe']
# pylint: disable=invalid-name
log = logging.getLogger(__name__)
class Error(Exception):
"""All errors in this module inherit from this class."""
pass
class CSVNotReadError(Error):
"""Error for failing to retrieve data from `input_csv`"""
def __init__(self, msg, errno_):
# type: (str, int) -> None
super(CSVNotReadError, self).__init__(msg)
self.msg = msg
self.errno = errno_
class CSVNotFoundError(Error):
"""Error when the `input_csv` argument is not found"""
pass
class InvalidCSVError(Error):
"""Error when the data inside the csv doesn't make sense."""
pass
def main():
# () -> None
"""Cli interface to this module"""
# Removes dependency on graphical system
plt.style.use('bmh')
logging.basicConfig(level=logging.INFO)
vargs = vars(parse_args())
try:
run(**vargs)
except KeyboardInterrupt as e:
log.debug(e, stack_info=True)
log.info("Keyboard Cancelled operation")
except Error as e:
log.debug(e, stack_info=True)
log.error("Could not generate figure: %s", str(e))
sys.exit(1)
print("%s => %s" % itemgetter('input_csv', 'output_filename')(vargs))
def parse_args():
# () -> argparse.Namespace
"""Parses command-line arguments and stuffs them into a namespace.
Returns a argparse.Namespace of arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('input_csv', help='path to fto statistic CSV')
parser.add_argument(
'output_filename', nargs='?',
help='output PNG filename', default='output.png'
)
parser.add_argument(
'--verbose', help='Turn debug output on.', action='store_true')
args = parser.parse_args()
return args
def get_mapped_vals(data_dict, keys):
# type: (dict, Iterable[Hashable]) -> List[Any]
"""Get the data from each key in `keys` from `data_dict`.
Args:
data_dict(dict): A container which must contains all of `keys`
keys(list): A list of keys to pull from `data_dict`
Returns:
A list of in-order values from the container `keys`
"""
values = []
for key in keys:
values.append(data_dict[key])
return values
def run(input_csv, # type: Union[str, IO]
output_filename=None, # type: Optional[str]
verbose=False # type: bool
): # pylint: disable=bad-continuation
# type: (...) -> plt.Figure
"""parse csv, modify dataframe, generate figure, save figure.
Args:
input_csv(str|fh): The input csv to read from. May be a filesystem path
or a http/https url.
output_filename(str|None): Relative or absolute
path for the figure output file.
If Falsy, only return figure, do not write to file. Default: None
verbose(bool): If true, also log debug output to stdout. Default: False
Returns:
A copy of the figure
"""
if verbose:
logging.getLogger().setLevel(logging.DEBUG)
log.setLevel(logging.DEBUG)
df = load_dataframe(input_csv)
figure = generate_figure(df)
if output_filename:
figure.savefig(output_filename)
return figure
def load_dataframe(csv_path_or_buffer):
# type: (Union[str, IO]) -> pd.DataFrame
"""Load pd.DataFrame from csv at `csv_path` on the filesystem.
The first column should be the Date column.
Args:
csv_path_or_buffer(str): Path to existing csv on filesystem
or a csv resource via http or https. Alternatively
it can to a file-like object which implements read.
Returns:
A `pd.DataFrame` with the column names 'Birth Queue',
'Population', and 'Pregnant Mothers'. Indexed by date of
occurance accurate up to an hour.
Raises:
CSVNotReadError if data from `csv_path` cannot be read from.
InvalidCSVError if the data in `csv_path` is not valid for this
program.
"""
columns = ['Date', 'Birth Queue', 'Population', 'Pregnant Mothers']
line = ""
csv_path = None # type: Optional[str]
csv_fh = None # type: Optional[IO]
if isinstance(csv_path_or_buffer, str):
csv_path = csv_path_or_buffer
parse_result = urlparse(csv_path)
elif hasattr(csv_path_or_buffer, "readline"):
csv_fh = csv_path_or_buffer
else:
raise ValueError(
"csv_path_or_buffer must be str or IO object, but got {}"
.format(type(csv_path_or_buffer).__name__))
try:
if csv_path is not None and parse_result.scheme in ["http", "https"]:
response = requests.get(csv_path)
csv_fh = io.StringIO(response.text)
elif csv_path is not None:
csv_fh = open(csv_path)
with csv_fh:
line = csv_fh.readline()
names = None if substrs_in_line(columns, line) else columns
csv_fh.seek(0)
# Supply column names if not present
# If present, do not supply column names
# since supplying column names while having
# column names in the csv leads to a row for data
# with column names.
df = pd.read_csv(csv_fh, names=names)
except (OSError, IOError) as e:
log.debug(e)
if e.errno == errno.ENOENT:
raise CSVNotFoundError(
"Could not find csv at %s" % csv_path_or_buffer, e.errno)
else:
raise CSVNotReadError(
"Could not retrieve data from csv %s"
% csv_path_or_buffer, e.errno)
verify_dataframe(df, columns)
df_adjusted = adjust_from_csv(df)
return df_adjusted
def adjust_from_csv(fto_df):
# type: (pd.DataFrame) -> pd.DataFrame
"""Adjust the fto Dataframe format from csv.
- Index by the Date column
- Delete the Date column
- Adjust Pregnant Mother count due to bug
Returns:
A new dataframe with the above changes.
"""
dates = fto_df['Date']
new_df = fto_df.drop("Date", axis=1)
# Reindex dataframe based on date column
new_df.index = pd.to_datetime(dates, format='%m/%d/%y-%H')
# There is a bug where the number of pregnant mothers is thrown off by one
if new_df["Pregnant Mothers"].min() == 1:
new_df["Pregnant Mothers"] = (new_df["Pregnant Mothers"] - 1)
return new_df
def verify_dataframe(fto_df, columns):
# type: (pd.DataFrame, Iterable[str]) -> None
"""Verify that the input dataframe has the expected columns.
Raises:
InvalidCSVError if `fto_df` doesn't have the
"Date", "Birth Queue", "Population", and "Pregnant Mothers" columns
"""
# Data verification
for col in columns:
# Pylint is confused by fto_df.columns
# pylint: disable=no-member
if col not in fto_df.columns:
raise InvalidCSVError(
"Column '%s' not present in input csv but should be" % (col))
def substrs_in_line(items, line):
# type: (Iterable[str], str) -> bool
"""Returns True if `line` contains any item from `items` else False.
Args:
items(list[str]): A container of substrings to test.
line(str): A string which may contain any string in `items`
Returns:
A bool indicating whether `line` contains any of `items`"""
for item in items:
if item not in line:
return False
return True
def generate_figure(df):
# type: (pd.DataFrame) -> plt.Figure
"""Generates a matplotlib.figure.Figure from `df`.
Takes the fto-data dataframe, plots Population, Birth Queue, and Pregnant
Mothers onto a matplotlib figure.
Args:
df(pd.DataFrame): The fto dataframe index by datetime.
TODO:
Remove 0 label for pregnant mothers
Returns:
The generated matplotlib figure.
"""
matplotlib.rcParams.update({'font.size': 22})
fig = plt.figure(figsize=(20, 15))
# Population
pop_label = "Population"
pop_color = 'r'
# Use 2/3 of grid
ax = plt.subplot2grid((3, 1), (0, 0), rowspan=2, label=pop_label)
ax.set_ylabel(pop_label, color=pop_color)
ax.plot(df['Population'], color=pop_color, clip_on=False, linewidth=5)
# Birth Queue
# Generate secordary axis for top subplot
ax_secondary = ax.twinx()
birth_queue_label = 'Birth Queue'
birth_queue_color = 'b'
ax_secondary.set_ylabel(birth_queue_label, color=birth_queue_color)
ax_secondary.plot(df['Birth Queue'],
color=birth_queue_color, clip_on=False, linewidth=5)
# Pregnant Mothers
preg_label = "Pregnant Mothers"
# Use lower 1/3 of graph
ax_lower = plt.subplot2grid((3, 1), (2, 0), rowspan=1)
ax_lower.set_ylabel(preg_label)
ax_lower.plot(df['Pregnant Mothers'],
color='g', clip_on=False, linewidth=5)
# Start pregnant mothers y-axis at 0 even though there might not be
# 0 pregnant mothers
ax_lower.set_ylim(0, ax_lower.get_ylim()[1])
# Autotilt dates
fig.autofmt_xdate()
# Remove 'half' mother labels which don't make sense
# and 0 mother label which collides with the date formatting.
tick_labels = ["" if not tick.is_integer() or tick == 0 else int(tick)
for tick in ax_lower.get_yticks()]
ax_lower.set_yticklabels(tick_labels)
return fig
if __name__ == "__main__":
main()
| |
'''
Created on 20 Apr 2013
@author: Kieran Finn
'''
'''
Created on 19 Feb 2013
@author: Kieran Finn
'''
import time
beginning=time.time()
import pylab as p
import urllib2
import sys
import numpy as np
import json
from glob import glob
import cPickle as pickle
from functions import *
from random import random
import mean
import ibcc
from collections import defaultdict
import os
import itertools
import settings
import shelve
folder='D:/Documents/Planet_hunters/results/05_Mar_2013_18_27/'
#folder='full_data_28_01_13'
fname='investigate_full.csv'
data_folder='D:/Documents/Planet_hunters/ph-stars/'
#data_folder='/Users/kieranfinn/Documents/ph-stars/'
out_folder="D:/Documents/Planet_hunters/results/"
out=fname.split('.')[0]+'_transits.dat'
zero_dict=shelve.open('zeros.dat')
transit_dict=shelve.open('transit_details.dat')
simulation_dict=shelve.open('simulation_transit_details.dat')
repetition_file=open('repetition.csv','w')
linking_length=0.2
max_allowed_depth=0.05
all_transits={}#a dictionary which contains the details of every transit
source_labels={}
light_curve_details={}
keys=['planet','eb','simulation','candidate']
def barchart(dictionary,colour='b'):
'''creates a barchart from a dictionary dictionary={label:frac}'''
labels=[]
fracs=[]
indices=[]
i=0
width=0.35
for key in keys:
indices.append(i)
labels.append(key)
fracs.append(dictionary[key])
i+=1
indices=np.array(indices)
p.bar(indices,fracs,width,color=colour)
p.xticks(indices+width/2., labels )
return True
def open_url(url):
N=3
n=0
while n<N:
try:
return urllib2.urlopen(url)
except:
n+=1
return False
def zero(release_id,url):
offset=131.511
if release_id=='1.0':
return offset
major,minor=release_id.split('.')
if major=='2':
return 30*float(minor)+offset
data=read_url(url,meta=True)
try:
return data['start_time']
except:
return False
def read_url(url,meta=False):
fname=data_folder+url.split('/')[-1]
if not os.path.isfile(fname) or os.path.getsize(fname)==0:
print '\n%s not downloaded' %fname
try:
f=open_url(url)
g=open(fname,'w')
g.write(f.read())
g.close()
f.close()
except:
print 'Problem with url %s' %url
return []
f=open(fname,'r')
r=f.read()
f.close()
if r[0]=='l':
r=r[17:].rstrip('\n;)') #there is a problem with some of the json files for the data. need to strip some of the header
try:
out=json.loads(r)
except:
print 'error reading json file'
print r[:100]+'...'+r[-100:]
return {}
try:
if meta:
out=out['meta_data']
else:
out=out['data']#gets rid of meta data if there
except:
pass
return out
def get_scores(folder):
out={}
files=glob(folder+'*.dat')
for fname in files:
name=fname.split('\\')[-1].split('.')[0]
f=open(fname,'r')
out[name]=pickle.load(f)
f.close()
return out
class box():
def __init__(self,x,y,width,height,user):
self.x1=x
self.x2=x+width
self.y1=y
self.y2=y+height
self.user=user
self.centre=x+0.5*width
self.id=os.urandom(32)
def points(self):
x=[self.x1,self.x1,self.x2,self.x2,self.x1]
y=[self.y1,self.y2,self.y2,self.y1,self.y1]
return [x,y]
def contains(self,x,y):
x=self.x1<=x<=self.x2
y=self.y1<=y<=self.y2
if (x and y):
return [self.user]
else:
return []
def plot(self):
x,y=self.points()
p.plot(x,y)
def get_data_points(self,data):
out=[]
indices=[]
for i in range(len(data)):
if self.x1<float(data[i]['x'])<self.x2:
indices.append(i)
out.append(data[i])
if len(indices)!=0:
indices=range(min(indices),max(indices)+1)
return [out,indices]
def refine_indices(self,indices,length):
min_length=100#minimum number of points required to set ambient light
if len(indices)>min_length:
return indices
low=min(indices)
high=max(indices)+1
to_add=min_length-len(indices)
front=range(low-(int(to_add/2)+1),low)
back=range(high,high+int(to_add/2)+1)
while len(front)!=0 and front[0]<0:#ensures data doesn't go out of range
del front[0]
back.append(back[-1]+1)
while len(back)!=0 and back[-1]>=length:
del back[-1]
front=[front[0]-1]+front
return front+indices+back
def get_depth(self,data):
points,indices=self.get_data_points(data)
if len(points)==0:
return 0
indices=self.refine_indices(indices,len(data))
x=[]
y=[]
for i in indices:
x.append(float(data[i]['x']))
y.append(float(data[i]['y']))
m,c=np.polyfit(x,y,1)#fit a line to the ambient data
x=np.array(x)
y=np.array(y)
expected=m*x+c
residuals=(expected-y)/expected
mx=-np.inf
for i in range(len(residuals)-1):
current=min([residuals[i],residuals[i+1]])
if current>mx:
mx=current
return mx
def pickleable(self):
return {'id':self.id,
'x1':self.x1,
'x2':self.x2,
'y1':self.y1,
'y2':self.y2,
'user':self.user}
class transit(box):
def __init__(self,group):
centre=sum([box.centre for box in group])/len(group)
width=sum([box.x2-box.x1 for box in group])/len(group)
self.x1=centre-0.5*width
self.x2=centre+0.5*width
self.centre=centre
self.y1=0
self.y2=2 #these are probably unessesery but include them so I can use all the box methods
self.users=set([box.user for box in group])
self.id='%.3f' %centre#need a unique id which can identify the transit so use x coordinate
def contains(self,x):
return self.x1<=x<=self.x2
def pickleable(self):
return {'id':self.id,
'x1':self.x1,
'x2':self.x2,
'y1':self.y1,
'y2':self.y2,
'users':self.users}
class light_curve():
def __init__(self,light_curve_id,url,release_id):
self.id=light_curve_id
self.url=url
self.release_id=release_id
self.boxes=[]
self.users=[]
self.box_tree=defaultdict(list)
self.box_resolution=2*linking_length
self.zero=self.get_zero()
self.data=False
self.scores=False
self.transits=False
self.kind=False
light_curve_details[self.id]=[url,release_id]
def add_box(self,x,y,width,height,user):
try:
x=float(x)
y=float(y)
width=float(width)
height=float(height)
if width<2:#ignore any boxes that are too wide
new_box=box(x,y,width,height,user)
self.boxes.append(new_box)
index=int(np.round(new_box.centre/self.box_resolution))
self.box_tree[index].append(new_box)
except:
pass
self.users.append(user)
def get_data(self):
if not self.data:
self.data=read_url(self.url)
offset=float(self.data[0]['x'])
for i in range(len(self.data)):
self.data[i]['x']=float(self.data[i]['x'])-offset#takes account of some files which are written in BJD
return self.data
def get_zero(self):
try:
return zero_dict[self.id]
except KeyError:
out=zero(self.release_id,self.url)
if out:
zero_dict[self.id]=out
return out
else:
print 'error calculating zero for light curve '+self.id
return 0
def score(self,x,y):
out=[]
for box in self.boxes:
out+=box.contains(x,y)
return len(set(out))
def get_scores(self):
if not self.scores:
self.scores=[]
for point in self.get_data():
try:
self.scores.append(self.score(float(point['x']),float(point['y'])))
except:
print point
print 'error with point'
continue
return self.scores
def find_transits(self):
'''uses friends of friends to catagorize the marked boxes into transits'''
if not self.transits:
to_add=copy(self.box_tree)
out=[]
while True:
try:
a=itertools.chain(*to_add.values()).next()
group=[a]
except StopIteration:
break
for box in group:
index=int(box.centre/self.box_resolution)
for i in [index,index+np.sign(box.centre)]:#int rounds positive numbers down and positive numbers up
temp=copy(to_add[i])
for new in to_add[i]:
if abs(new.centre-box.centre)<linking_length:
temp.remove(new)
group.append(new)
to_add[i]=temp
tran=transit(set(group))
out.append(tran)
all_transits[self.id+'_'+tran.id]=[tran.x1,tran.x2-tran.x1]
self.transits=out
return self.transits
def get_label(self,tran):
if not self.kind:
self.kind='candidate'#may do something with kind in the future
try:
transit_dict[self.id]
self.kind='planet'
except:
pass
try:
simulation_dict[self.id]
self.kind='simulation'
except:
pass
if self.kind=='candidate':
out='0'
elif self.kind=='planet':
out=False
for planet in transit_dict[self.id]:
epoch,period=planet
t=self.zero+tran.centre
t=epoch + np.round((t-epoch)/period)*period -self.zero
out=out or tran.contains(t)
out=str(int(out))
elif self.kind=='simulation':
out=False
for centre in simulation_dict[self.id]:
out=out or tran.contains(centre)
out=str(int(out))
else:
print 'error finding label'
out='0'
return out
def output(self):
objects=[]
people=[]
scores=[]
labels=[]
users=set(self.users)
for tran in self.find_transits():
people+=list(tran.users)
people+=list(users.difference(tran.users))
scores+=[1 for i in range(len(tran.users))]+[0 for i in range(len(users)-len(tran.users))]
objects+=[self.id+'_'+tran.id for i in range(len(users))]
label=self.get_label(tran)
labels+=[label for i in range(len(users))]
return (objects,people,scores,labels)
def cleanup(self):
out=False
primaries=[]
temp=[]
for tran in self.boxes:
depth=tran.get_depth(self.get_data())
if depth<0 or depth>max_allowed_depth:#a binary transit
primaries.append(tran.centre+self.get_zero())
else:
temp.append(tran)
out=True
self.boxes=temp#shouldn't remove objects for a list being iterated over
self.data=False#this should free up some memory. Hopefully.
return (out,primaries)
def pickleable(self):
out={'id':self.id,
'url':self.url,
'release_id':self.release_id,
'users':self.users}
boxes=[bx.pickleable() for bx in self.boxes]
out['boxes']=boxes
return out
def plot(self,clean=False,transits=False):
p.figure()
x=[]
y=[]
dy=[]
for point in self.get_data():
try:
x.append(float(point['x']))
y.append(float(point['y']))
dy.append(float(point['dy']))
except:
print point
print 'error with point'
continue
p.errorbar(x,y,yerr=dy,fmt='ro')
p.title('light curve id: %s\nrelease id: %s' %(self.id,self.release_id))
if not clean:
to_plot= self.find_transits() if transits else self.boxes
for box in to_plot:
box.plot()
class source():
def __init__(self,source_id,label):
self.id=source_id
self.light_curves={}
self.label=label.strip('"')
def get_light_curve(self,light_curve_id,url,release_id):
if light_curve_id not in set(self.light_curves.keys()):
self.light_curves[light_curve_id]=light_curve(light_curve_id,url,release_id)
return self.light_curves[light_curve_id]
def output(self):
objects=[]
people=[]
labels=[]
scores=[]
for lc in self.light_curves.values():
new_objects,new_people,new_scores,new_labels=lc.output()
objects+=new_objects
people+=new_people
scores+=new_scores
labels+=new_labels
objects=[self.id+'_'+obj for obj in objects]
return (objects,people,scores,labels)
def get_centres(self):
centres=[]
lookup={}
for lc in self.light_curves.values():
for tran in lc.boxes:
t=tran.centre+lc.get_zero()
centres.append(t)
lookup[t]=(lc,tran)
centres=np.sort(centres)
return (centres,lookup)
def get_period(self,centres):
if len(centres)<2:
return False
periods=[centres[i+1]-centres[i] for i in range(len(centres)-1)]
scores={}
for i in range(len(periods)):
score=0
for centre in centres:
to_add=(abs(centres[i]-centre)%periods[i])/periods[i]
if to_add>0.5: #takes account of measured transit being closer to the next actual transit
to_add=1.0-to_add
score+=to_add
scores[periods[i]]=score
period=min(scores, key=scores.get)
return period
def remove_secondaries(self,primaries):
centres,lookup=self.get_centres()
if len(centres)<2:
return True
period=self.get_period(primaries)
mx=-np.inf
for start in centres:
temp=[]
score=0
for end in centres:
lc,tran=lookup[end]
t=np.round((end-start)/period)*period+start
if len(tran.contains(t-lc.get_zero(),1)):
score+=1
temp.append(end)
if score>mx:
mx=score
to_remove=temp
if abs(mx-len(primaries))<=1 or float(abs(mx-len(primaries)))/len(primaries)<0.1:
if len(centres)==mx:#all marked transits are secondaries
return False
for centre in to_remove:
lc,tran=lookup[centre]
lc.boxes.remove(tran)
return True
return True
def cleanup(self):
out=False
primaries=[]
for lc in self.light_curves.values():
temp,prim=lc.cleanup()
primaries+=prim
out=out or temp#source is real candidate if ANY light curves pass the test
if len(primaries):
out=out and self.remove_secondaries(primaries)
return out
def look_for_repeated(self):
allowance=0.2
centres,lookup=self.get_centres()
if len(centres)<3:
return False
period=self.get_period(centres)
start_scores=[]
for start in centres:
out=0
for i in centres:
to_add=(abs(start-i)%period)/period
if to_add>0.5: #takes account of measured transit being closer to the next actual transit
to_add=1.0-to_add
out+=to_add
start_scores.append(out)
if min(start_scores)/(period*(len(centres)-1))>allowance:
return False
i=start_scores.index(min(start_scores))
start=centres[i]
for i in centres:
temp=(abs(start-i)%period)/period
if temp>0.5:
temp=1.0-temp
repetition_file.write('%s,%f\n' %(self.label,temp))
if temp>allowance:
lc,tran=lookup[i]
lc.boxes.remove(tran)
return True
def pickleable(self):
out={'id':self.id,
'label':self.label}
lcs=[lc.pickleable() for lc in self.light_curves.values()]
out['light_curves']=lcs
return out
def plot(self,clean=False,transits=False):
print 'Details for source number %s' %self.id
print 'Label: %s' %self.label
for method in scores.keys():
print '%s: %.4f' %(method, scores[method][self.id])
for light_curve_id in self.light_curves.keys():
self.light_curves[light_curve_id].plot(clean=clean,transits=transits)
def get_depths(self):
out=[]
for lc in self.light_curves.values():
for tran in lc.boxes:
out.append(tran.get_depth(lc.get_data()))
return float(sum(out))/len(out) #may want to change that to median
def read_data(fname):
out={}
f=open(fname,'r')
r=f.readlines()
f.close()
count=0
for line in r:
if count%1000==0:
overprint('processing line %s of %s' %(add_comma(count),add_comma(len(r))))
count +=1
source_id,light_curve_id,user_id,label,url,release_id,x,y,width,height=line.strip().split(',')
source_labels[source_id]=label
url=url.strip('"')
if source_id not in set(out.keys()):
out[source_id]=source(source_id,label)
lc=out[source_id].get_light_curve(light_curve_id,url,release_id)
lc.add_box(x,y,width,height,user_id)
print '\n'
return out
def make_list(fname,sources):
out=defaultdict(int)
f=open(fname,'w')
for i in sources.keys():
out[sources[i].label]+=1
no_transits=sum([len(lc.boxes) for lc in sources[i].light_curves.values()])
f.write('%s,%d,%s\n' %(i,no_transits,source_labels[i]))
f.close()
return out
'''main program'''
dir_name=out_folder+date_string()#have this as a new folder to avoid overwriting old files
orig_dir=dir_name
end=1
while True:
try:
os.mkdir(dir_name)
break
except WindowsError:
dir_name=orig_dir+'_%02d' %end
end+=1
settings.dir_name=dir_name
print 'Storing data in %s' %dir_name
print 'finding previous scores from %s' %folder
scores=get_scores(folder)
print 'Reading data from %s' %fname
sources=read_data(fname)
print 'sources before cuts: %s' %add_comma(len(sources))
out_objects=longlist([],500000)
out_people=longlist([],500000)
out_scores=longlist([],500000)
out_labels=longlist([],500000)
count=1
for current_source in sources.values():
overprint('finding transits for source %s of %s. id=%s' %(add_comma(count),add_comma(len(sources)),current_source.id))
count+=1
new_objects,new_people,new_scores,new_labels=current_source.output()
out_objects.add(new_objects)
out_people.add(new_people)
out_scores.add(new_scores)
out_labels.add(new_labels)
f=open(dir_name+'/source_transits_lc_details.dat','w')
pickle.dump([source_labels,all_transits,light_curve_details],f)
f.close()
try:
del sources #free up some memory
except:
pass
print '\n'
print 'total transits found: %s' %add_comma(len(out_objects))
f=open(dir_name+'/'+out,'w')
for i in range(len(out_objects)):
f.write('%s,%s,%d,%s\n' %(out_objects[i],out_people[i],out_scores[i],out_labels[i]))
f.close()
fname='depth_list.csv'
f=open(fname,'r')
needed=[]
for line in f:
sid=line.split(',')[0]
sid=sid.strip().strip('"')
needed.append(sid)
f.close()
needed=set(needed)
sources={}
for tran in out_objects:
source_id,light_curve_id,tran_id=tran.split('_')
if source_id in needed:
x,width=all_transits[light_curve_id+'_'+tran_id]
url,release_id=light_curve_details[light_curve_id]
if source_id not in set(sources.keys()):
sources[source_id]=source(source_id,source_labels[source_id])
lc=sources[source_id].get_light_curve(light_curve_id,url,release_id)
lc.add_box(x,0,width,2,tran_id)#again, using 0-2 for y axis to include everything. May want to make this more sophisticated at some point
fname='depth_list.csv'
f=open(fname,'r')
r=f.readlines()
f.close()
f=open(fname,'w')
f.write('id,kepler,transits,depth,mean,weighted,frequentist,ibcc,promising,notes\n')
for line in r[1:]:
sid,kepler,transits,mean,weighted,frequentist,ibcc,promising,notes=line.split(',',8)
depth=sources[sid.strip().strip('"')].get_depths()
f.write('%s,%s,%s,%.3e,%s,%s,%s,%s,%s,%s'%(sid,kepler,transits,depth,mean,weighted,frequentist,ibcc,promising,notes))
f.close()
repetition_file.close()
print 'Total running time: %s' %hms(time.time()-beginning)
settings.close_all()
| |
import atexit
import logging
import json
import numpy as np
import os
import six
import sys
import threading
import weakref
from gym import error, version
from gym.monitoring import stats_recorder, video_recorder
from gym.utils import atomic_write, closer, seeding
logger = logging.getLogger(__name__)
FILE_PREFIX = 'openaigym'
MANIFEST_PREFIX = FILE_PREFIX + '.manifest'
def detect_training_manifests(training_dir):
return [os.path.join(training_dir, f) for f in os.listdir(training_dir) if f.startswith(MANIFEST_PREFIX + '.')]
def detect_monitor_files(training_dir):
return [os.path.join(training_dir, f) for f in os.listdir(training_dir) if f.startswith(FILE_PREFIX + '.')]
def clear_monitor_files(training_dir):
files = detect_monitor_files(training_dir)
if len(files) == 0:
return
logger.info('Clearing %d monitor files from previous run (because force=True was provided)', len(files))
for file in files:
os.unlink(file)
def capped_cubic_video_schedule(episode_id):
if episode_id < 1000:
return int(round(episode_id ** (1. / 3))) ** 3 == episode_id
else:
return episode_id % 1000 == 0
def disable_videos(episode_id):
return False
monitor_closer = closer.Closer()
# This method gets used for a sanity check in scoreboard/api.py. It's
# not intended for use outside of the gym codebase.
def _open_monitors():
return list(monitor_closer.closeables.values())
class Monitor(object):
"""A configurable monitor for your training runs.
Every env has an attached monitor, which you can access as
'env.monitor'. Simple usage is just to call 'monitor.start(dir)'
to begin monitoring and 'monitor.close()' when training is
complete. This will record stats and will periodically record a video.
For finer-grained control over how often videos are collected, use the
video_callable argument, e.g.
'monitor.start(video_callable=lambda count: count % 100 == 0)'
to record every 100 episodes. ('count' is how many episodes have completed)
Depending on the environment, video can slow down execution. You
can also use 'monitor.configure(video_callable=lambda count: False)' to disable
video.
Monitor supports multiple threads and multiple processes writing
to the same directory of training data. The data will later be
joined by scoreboard.upload_training_data and on the server.
Args:
env (gym.Env): The environment instance to monitor.
Attributes:
id (Optional[str]): The ID of the monitored environment
"""
def __init__(self, env):
# Python's GC allows refcycles *or* for objects to have a
# __del__ method. So we need to maintain a weakref to env.
#
# https://docs.python.org/2/library/gc.html#gc.garbage
self._env_ref = weakref.ref(env)
self.videos = []
self.stats_recorder = None
self.video_recorder = None
self.enabled = False
self.episode_id = 0
self._monitor_id = None
self.seeds = None
@property
def env(self):
env = self._env_ref()
if env is None:
raise error.Error("env has been garbage collected. To keep using a monitor, you must keep around a reference to the env object. (HINT: try assigning the env to a variable in your code.)")
return env
def start(self, directory, video_callable=None, force=False, resume=False, seed=None, write_upon_reset=False):
"""Start monitoring.
Args:
directory (str): A per-training run directory where to record stats.
video_callable (Optional[function, False]): function that takes in the index of the episode and outputs a boolean, indicating whether we should record a video on this episode. The default (for video_callable is None) is to take perfect cubes, capped at 1000. False disables video recording.
force (bool): Clear out existing training data from this directory (by deleting every file prefixed with "openaigym.").
resume (bool): Retain the training data already in this directory, which will be merged with our new data
seed (Optional[int]): The seed to run this environment with. By default, a random seed will be chosen.
write_upon_reset (bool): Write the manifest file on each reset. (This is currently a JSON file, so writing it is somewhat expensive.)
"""
if self.env.spec is None:
logger.warn("Trying to monitor an environment which has no 'spec' set. This usually means you did not create it via 'gym.make', and is recommended only for advanced users.")
if not os.path.exists(directory):
logger.info('Creating monitor directory %s', directory)
os.makedirs(directory)
if video_callable is None:
video_callable = capped_cubic_video_schedule
elif video_callable == False:
video_callable = disable_videos
elif not callable(video_callable):
raise error.Error('You must provide a function, None, or False for video_callable, not {}: {}'.format(type(video_callable), video_callable))
# Check on whether we need to clear anything
if force:
clear_monitor_files(directory)
elif not resume:
training_manifests = detect_training_manifests(directory)
if len(training_manifests) > 0:
raise error.Error('''Trying to write to monitor directory {} with existing monitor files: {}.
You should use a unique directory for each training run, or use 'force=True' to automatically clear previous monitor files.'''.format(directory, ', '.join(training_manifests[:5])))
self._monitor_id = monitor_closer.register(self)
self.enabled = True
self.directory = os.path.abspath(directory)
# We use the 'openai-gym' prefix to determine if a file is
# ours
self.file_prefix = FILE_PREFIX
self.file_infix = '{}.{}'.format(self._monitor_id, os.getpid())
self.stats_recorder = stats_recorder.StatsRecorder(directory, '{}.episode_batch.{}'.format(self.file_prefix, self.file_infix))
self.configure(video_callable=video_callable)
if not os.path.exists(directory):
os.mkdir(directory)
self.write_upon_reset = write_upon_reset
seeds = self.env.seed(seed)
self.seeds = seeds
def flush(self, force=False):
"""Flush all relevant monitor information to disk."""
if not self.write_upon_reset and not force:
return
self.stats_recorder.flush()
# Give it a very distiguished name, since we need to pick it
# up from the filesystem later.
path = os.path.join(self.directory, '{}.manifest.{}.manifest.json'.format(self.file_prefix, self.file_infix))
logger.debug('Writing training manifest file to %s', path)
with atomic_write.atomic_write(path) as f:
# We need to write relative paths here since people may
# move the training_dir around. It would be cleaner to
# already have the basenames rather than basename'ing
# manually, but this works for now.
json.dump({
'stats': os.path.basename(self.stats_recorder.path),
'videos': [(os.path.basename(v), os.path.basename(m))
for v, m in self.videos],
'env_info': self._env_info(),
'seeds': self.seeds,
}, f)
def close(self):
"""Flush all monitor data to disk and close any open rending windows."""
if not self.enabled:
return
self.stats_recorder.close()
if self.video_recorder is not None:
self._close_video_recorder()
self.flush(force=True)
env = self._env_ref()
# Only take action if the env hasn't been GC'd
if env is not None:
# Note we'll close the env's rendering window even if we did
# not open it. There isn't a particular great way to know if
# we did, since some environments will have a window pop up
# during video recording.
try:
env.render(close=True)
except Exception as e:
if env.spec:
key = env.spec.id
else:
key = env
# We don't want to avoid writing the manifest simply
# because we couldn't close the renderer.
logger.error('Could not close renderer for %s: %s', key, e)
# Remove the env's pointer to this monitor
del env._monitor
# Stop tracking this for autoclose
monitor_closer.unregister(self._monitor_id)
self.enabled = False
logger.info('''Finished writing results. You can upload them to the scoreboard via gym.upload(%r)''', self.directory)
def configure(self, video_callable=None):
"""Reconfigure the monitor.
video_callable (function): Whether to record video to upload to the scoreboard.
"""
if video_callable is not None:
self.video_callable = video_callable
def _before_step(self, action):
if not self.enabled: return
self.stats_recorder.before_step(action)
def _after_step(self, observation, reward, done, info):
if not self.enabled: return done
# Add 1 since about to take another step
if self.env.spec and self.stats_recorder.steps+1 >= self.env.spec.timestep_limit:
logger.debug('Ending episode %i because it reached the timestep limit of %i.', self.episode_id, self.env.spec.timestep_limit)
done = True
# Record stats
self.stats_recorder.after_step(observation, reward, done, info)
# Record video
self.video_recorder.capture_frame()
return done
def _before_reset(self):
if not self.enabled: return
self.stats_recorder.before_reset()
def _after_reset(self, observation):
if not self.enabled: return
# Reset the stat count
self.stats_recorder.after_reset(observation)
# Close any existing video recorder
if self.video_recorder:
self._close_video_recorder()
# Start recording the next video.
#
# TODO: calculate a more correct 'episode_id' upon merge
self.video_recorder = video_recorder.VideoRecorder(
env=self.env,
base_path=os.path.join(self.directory, '{}.video.{}.video{:06}'.format(self.file_prefix, self.file_infix, self.episode_id)),
metadata={'episode_id': self.episode_id},
enabled=self._video_enabled(),
)
self.video_recorder.capture_frame()
# Bump *after* all reset activity has finished
self.episode_id += 1
self.flush()
def _close_video_recorder(self):
self.video_recorder.close()
if self.video_recorder.functional:
self.videos.append((self.video_recorder.path, self.video_recorder.metadata_path))
def _video_enabled(self):
return self.video_callable(self.episode_id)
def _env_info(self):
env_info = {
'gym_version': version.VERSION,
}
if self.env.spec:
env_info['env_id'] = self.env.spec.id
return env_info
def __del__(self):
# Make sure we've closed up shop when garbage collecting
self.close()
def load_results(training_dir):
if not os.path.exists(training_dir):
logger.error('Training directory %s not found', training_dir)
return
manifests = detect_training_manifests(training_dir)
if not manifests:
logger.error('No manifests found in training directory %s', training_dir)
return
logger.debug('Uploading data from manifest %s', ', '.join(manifests))
# Load up stats + video files
stats_files = []
videos = []
main_seeds = []
seeds = []
env_infos = []
for manifest in manifests:
with open(manifest) as f:
contents = json.load(f)
# Make these paths absolute again
stats_files.append(os.path.join(training_dir, contents['stats']))
videos += [(os.path.join(training_dir, v), os.path.join(training_dir, m))
for v, m in contents['videos']]
env_infos.append(contents['env_info'])
current_seeds = contents.get('seeds', [])
seeds += current_seeds
if current_seeds:
main_seeds.append(current_seeds[0])
else:
# current_seeds could be None or []
main_seeds.append(None)
env_info = collapse_env_infos(env_infos, training_dir)
timestamps, episode_lengths, episode_rewards, initial_reset_timestamp = merge_stats_files(stats_files)
return {
'manifests': manifests,
'env_info': env_info,
'timestamps': timestamps,
'episode_lengths': episode_lengths,
'episode_rewards': episode_rewards,
'initial_reset_timestamp': initial_reset_timestamp,
'videos': videos,
'main_seeds': main_seeds,
'seeds': seeds,
}
def merge_stats_files(stats_files):
timestamps = []
episode_lengths = []
episode_rewards = []
initial_reset_timestamps = []
for path in stats_files:
with open(path) as f:
content = json.load(f)
if len(content['timestamps'])==0: continue # so empty file doesn't mess up results, due to null initial_reset_timestamp
timestamps += content['timestamps']
episode_lengths += content['episode_lengths']
episode_rewards += content['episode_rewards']
initial_reset_timestamps.append(content['initial_reset_timestamp'])
idxs = np.argsort(timestamps)
timestamps = np.array(timestamps)[idxs].tolist()
episode_lengths = np.array(episode_lengths)[idxs].tolist()
episode_rewards = np.array(episode_rewards)[idxs].tolist()
initial_reset_timestamp = min(initial_reset_timestamps)
return timestamps, episode_lengths, episode_rewards, initial_reset_timestamp
def collapse_env_infos(env_infos, training_dir):
assert len(env_infos) > 0
first = env_infos[0]
for other in env_infos[1:]:
if first != other:
raise error.Error('Found two unequal env_infos: {} and {}. This usually indicates that your training directory {} has commingled results from multiple runs.'.format(first, other, training_dir))
for key in ['env_id', 'gym_version']:
if key not in first:
raise error.Error("env_info {} from training directory {} is missing expected key {}. This is unexpected and likely indicates a bug in gym.".format(first, training_dir, key))
return first
| |
# Generates 3D visualizations of input files.
import os
from binwalk.core.compat import *
from binwalk.core.common import BlockFile
from binwalk.core.module import Module, Option, Kwarg
class Plotter(Module):
'''
Base class for visualizing binaries in Qt.
Other plotter classes are derived from this.
'''
VIEW_DISTANCE = 1024
MAX_2D_PLOT_POINTS = 12500
MAX_3D_PLOT_POINTS = 25000
TITLE = "Binary Visualization"
CLI = [
Option(short='3',
long='3D',
kwargs={'axis' : 3, 'enabled' : True},
description='Generate a 3D binary visualization'),
Option(short='2',
long='2D',
kwargs={'axis' : 2, 'enabled' : True},
description='Project data points onto 3D cube walls only'),
Option(short='Z',
long='points',
type=int,
kwargs={'max_points' : 0},
description='Set the maximum number of plotted data points'),
# Option(short='V',
# long='grids',
# kwargs={'show_grids' : True},
# description='Display the x-y-z grids in the resulting plot'),
]
KWARGS = [
Kwarg(name='axis', default=3),
Kwarg(name='max_points', default=0),
Kwarg(name='show_grids', default=False),
Kwarg(name='enabled', default=False),
]
# There isn't really any useful data to print to console. Disable header and result output.
HEADER = None
RESULT = None
def init(self):
import pyqtgraph.opengl as gl
from pyqtgraph.Qt import QtGui
self.verbose = self.config.verbose
self.offset = self.config.offset
self.length = self.config.length
self.plane_count = -1
self.plot_points = None
if self.axis == 2:
self.MAX_PLOT_POINTS = self.MAX_2D_PLOT_POINTS
self._generate_data_point = self._generate_2d_data_point
elif self.axis == 3:
self.MAX_PLOT_POINTS = self.MAX_3D_PLOT_POINTS
self._generate_data_point = self._generate_3d_data_point
else:
raise Exception("Invalid Plotter axis specified: %d. Must be one of: [2,3]" % self.axis)
if not self.max_points:
self.max_points = self.MAX_PLOT_POINTS
self.app = QtGui.QApplication([])
self.window = gl.GLViewWidget()
self.window.opts['distance'] = self.VIEW_DISTANCE
if len(self.config.target_files) == 1:
self.window.setWindowTitle(self.config.target_files[0])
def _print(self, message):
'''
Print console messages. For internal use only.
'''
if self.verbose:
print(message)
def _generate_plot_points(self, data_points):
'''
Generates plot points from a list of data points.
@data_points - A dictionary containing each unique point and its frequency of occurance.
Returns a set of plot points.
'''
total = 0
min_weight = 0
weightings = {}
plot_points = {}
# If the number of data points exceeds the maximum number of allowed data points, use a
# weighting system to eliminate data points that occur less freqently.
if sum(data_points.values()) > self.max_points:
# First, generate a set of weight values 1 - 10
for i in range(1, 11):
weightings[i] = 0
# Go through every data point and how many times that point occurs
for (point, count) in iterator(data_points):
# For each data point, compare it to each remaining weight value
for w in get_keys(weightings):
# If the number of times this data point occurred is >= the weight value,
# then increment the weight value. Since weight values are ordered lowest
# to highest, this means that more frequent data points also increment lower
# weight values. Thus, the more high-frequency data points there are, the
# more lower-frequency data points are eliminated.
if count >= w:
weightings[w] += 1
else:
break
# Throw out weight values that exceed the maximum number of data points
if weightings[w] > self.max_points:
del weightings[w]
# If there's only one weight value left, no sense in continuing the loop...
if len(weightings) == 1:
break
# The least weighted value is our minimum weight
min_weight = min(weightings)
# Get rid of all data points that occur less frequently than our minimum weight
for point in get_keys(data_points):
if data_points[point] < min_weight:
del data_points[point]
for point in sorted(data_points, key=data_points.get, reverse=True):
plot_points[point] = data_points[point]
# Register this as a result in case future modules need access to the raw point information,
# but mark plot as False to prevent the entropy module from attempting to overlay this data on its graph.
self.result(point=point, plot=False)
total += 1
if total >= self.max_points:
break
return plot_points
def _generate_data_point(self, data):
'''
Subclasses must override this to return the appropriate data point.
@data - A string of data self.axis in length.
Returns a data point tuple.
'''
return (0,0,0)
def _generate_data_points(self, fp):
'''
Generates a dictionary of data points and their frequency of occurrance.
@fp - The BlockFile object to generate data points from.
Returns a dictionary.
'''
i = 0
data_points = {}
self._print("Generating data points for %s" % fp.name)
# We don't need any extra data from BlockFile
fp.set_block_size(peek=0)
while True:
(data, dlen) = fp.read_block()
if not data or not dlen:
break
i = 0
while (i+(self.axis-1)) < dlen:
point = self._generate_data_point(data[i:i+self.axis])
if has_key(data_points, point):
data_points[point] += 1
else:
data_points[point] = 1
i += 3
return data_points
def _generate_plot(self, plot_points):
import numpy as np
import pyqtgraph.opengl as gl
nitems = float(len(plot_points))
pos = np.empty((nitems, 3))
size = np.empty((nitems))
color = np.empty((nitems, 4))
i = 0
for (point, weight) in iterator(plot_points):
r = 0.0
g = 0.0
b = 0.0
pos[i] = point
frequency_percentage = (weight / nitems)
# Give points that occur more frequently a brighter color and larger point size.
# Frequency is determined as a percentage of total unique data points.
if frequency_percentage > .010:
size[i] = .20
r = 1.0
elif frequency_percentage > .005:
size[i] = .15
b = 1.0
elif frequency_percentage > .002:
size[i] = .10
g = 1.0
r = 1.0
else:
size[i] = .05
g = 1.0
color[i] = (r, g, b, 1.0)
i += 1
scatter_plot = gl.GLScatterPlotItem(pos=pos, size=size, color=color, pxMode=False)
scatter_plot.translate(-127.5, -127.5, -127.5)
return scatter_plot
def plot(self, wait=True):
import pyqtgraph.opengl as gl
self.window.show()
if self.show_grids:
xgrid = gl.GLGridItem()
ygrid = gl.GLGridItem()
zgrid = gl.GLGridItem()
self.window.addItem(xgrid)
self.window.addItem(ygrid)
self.window.addItem(zgrid)
# Rotate x and y grids to face the correct direction
xgrid.rotate(90, 0, 1, 0)
ygrid.rotate(90, 1, 0, 0)
# Scale grids to the appropriate dimensions
xgrid.scale(12.8, 12.8, 12.8)
ygrid.scale(12.8, 12.8, 12.8)
zgrid.scale(12.8, 12.8, 12.8)
for fd in iter(self.next_file, None):
data_points = self._generate_data_points(fd)
self._print("Generating plot points from %d data points" % len(data_points))
self.plot_points = self._generate_plot_points(data_points)
del data_points
self._print("Generating graph from %d plot points" % len(self.plot_points))
self.window.addItem(self._generate_plot(self.plot_points))
if wait:
self.wait()
def wait(self):
from pyqtgraph.Qt import QtCore, QtGui
t = QtCore.QTimer()
t.start(50)
QtGui.QApplication.instance().exec_()
def _generate_3d_data_point(self, data):
'''
Plot data points within a 3D cube.
'''
return (ord(data[0]), ord(data[1]), ord(data[2]))
def _generate_2d_data_point(self, data):
'''
Plot data points projected on each cube face.
'''
self.plane_count += 1
if self.plane_count > 5:
self.plane_count = 0
if self.plane_count == 0:
return (0, ord(data[0]), ord(data[1]))
elif self.plane_count == 1:
return (ord(data[0]), 0, ord(data[1]))
elif self.plane_count == 2:
return (ord(data[0]), ord(data[1]), 0)
elif self.plane_count == 3:
return (255, ord(data[0]), ord(data[1]))
elif self.plane_count == 4:
return (ord(data[0]), 255, ord(data[1]))
elif self.plane_count == 5:
return (ord(data[0]), ord(data[1]), 255)
def run(self):
self.plot()
return True
| |
from __future__ import absolute_import
import datetime
from django.contrib import admin
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.views.main import ChangeList, SEARCH_VAR, ALL_VAR
from django.contrib.auth.models import User
from django.template import Context, Template
from django.test import TestCase
from django.test.client import RequestFactory
from django.utils import formats
from django.utils import six
from .admin import (ChildAdmin, QuartetAdmin, BandAdmin, ChordsBandAdmin,
GroupAdmin, ParentAdmin, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, CustomPaginationAdmin,
FilteredChildAdmin, CustomPaginator, site as custom_site,
SwallowAdmin)
from .models import (Event, Child, Parent, Genre, Band, Musician, Group,
Quartet, Membership, ChordsMusician, ChordsBand, Invitation, Swallow,
UnorderedObject, OrderedObject)
class ChangeListTests(TestCase):
urls = "regressiontests.admin_changelist.urls"
def setUp(self):
self.factory = RequestFactory()
def _create_superuser(self, username):
return User.objects.create(username=username, is_superuser=True)
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_query_set() shouldn't
overwrite a custom select_related provided by ModelAdmin.queryset().
"""
m = ChildAdmin(Child, admin.site)
request = self.factory.get('/child/')
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
self.assertEqual(cl.query_set.query.select_related, {'parent': {'name': {}}})
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
row_html = '<tbody><tr class="row1"><th><a href="%d/">name</a></th><td class="nowrap">(None)</td></tr></tbody>' % new_child.id
self.assertFalse(table_output.find(row_html) == -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
row_html = '<tbody><tr class="row1"><th><a href="%d/">name</a></th><td class="nowrap">Parent object</td></tr></tbody>' % new_child.id
self.assertFalse(table_output.find(row_html) == -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = '<div class="hiddenfields"><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></div>' % new_child.id
self.assertFalse(table_output.find(hiddenfields_div) == -1,
'Failed to find hidden fields in: %s' % table_output)
# make sure that list editable fields are rendered in divs correctly
editable_name_field = '<input name="form-0-name" value="name" class="vTextField" maxlength="30" type="text" id="id_form-0-name" />'
self.assertFalse('<td>%s</td>' % editable_name_field == -1,
'Failed to find "name" list_editable field in: %s' % table_output)
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name='parent')
for i in range(200):
new_child = Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/', data={'p': -1}) # Anything outside range
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
self.assertRaises(IncorrectLookupParameters, lambda: \
ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m))
def test_custom_paginator(self):
new_parent = Parent.objects.create(name='parent')
for i in range(200):
new_child = Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = CustomPaginationAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't apper more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name='Blues')
band = Band.objects.create(name='B.B. King Review', nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, admin.site)
request = self.factory.get('/band/', data={'genres': blues.pk})
cl = ChangeList(request, Band, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't apper more than once. With an intermediate model.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = GroupAdmin(Group, admin.site)
request = self.factory.get('/group/', data={'members': lead.pk})
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't apper more than once. Model managed in the
admin inherits from the one that defins the relationship.
"""
lead = Musician.objects.create(name='John')
four = Quartet.objects.create(name='The Beatles')
Membership.objects.create(group=four, music=lead, role='lead voice')
Membership.objects.create(group=four, music=lead, role='guitar player')
m = QuartetAdmin(Quartet, admin.site)
request = self.factory.get('/quartet/', data={'members': lead.pk})
cl = ChangeList(request, Quartet, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't apper more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name='Player A')
three = ChordsBand.objects.create(name='The Chords Trio')
Invitation.objects.create(band=three, player=lead, instrument='guitar')
Invitation.objects.create(band=three, player=lead, instrument='bass')
m = ChordsBandAdmin(ChordsBand, admin.site)
request = self.factory.get('/chordsband/', data={'members': lead.pk})
cl = ChangeList(request, ChordsBand, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
# Two children with the same name
Child.objects.create(parent=parent, name='Daniel')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, admin.site)
request = self.factory.get('/parent/', data={'child__name': 'Daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.query_set.count(), 1)
def test_distinct_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
Child.objects.create(parent=parent, name='Danielle')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, admin.site)
request = self.factory.get('/parent/', data={SEARCH_VAR: 'daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.query_set.count(), 1)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
request = self.factory.get('/child/')
# Test default queryset
m = ChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.query_set.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(cl.paginator.page_range, [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.query_set.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(cl.paginator.page_range, [1, 2, 3])
def test_computed_list_display_localization(self):
"""
Regression test for #13196: output of functions should be localized
in the changelist.
"""
User.objects.create_superuser(
username='super', email='super@localhost', password='secret')
self.client.login(username='super', password='secret')
event = Event.objects.create(date=datetime.date.today())
response = self.client.get('/admin/admin_changelist/event/')
self.assertContains(response, formats.localize(event.date))
self.assertNotContains(response, six.text_type(event.date))
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, 'Parent object')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ['name', 'age'])
self.assertEqual(list_display_links, ['name'])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['parent'])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
def test_show_all(self):
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get('/child/', data={ALL_VAR: ''})
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, admin.site)
# 200 is the max we'll pass to ChangeList
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 200, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, admin.site)
# 30 is the max we'll pass to ChangeList for this test
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 30, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name='parent')
for i in range(1, 10):
Child.objects.create(id=i, name='child %s' % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/child/', superuser)
response = m.changelist_view(request)
for i in range(1, 10):
self.assertContains(response, '<a href="%s/">%s</a>' % (i, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['age'])
def test_tuple_list_display(self):
"""
Regression test for #17128
(ChangeList failing under Python 2.5 after r16319)
"""
swallow = Swallow.objects.create(
origin='Africa', load='12.34', speed='22.2')
model_admin = SwallowAdmin(Swallow, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/swallow/', superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, six.text_type(swallow.origin))
self.assertContains(response, six.text_type(swallow.load))
self.assertContains(response, six.text_type(swallow.speed))
def test_deterministic_order_for_unordered_model(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model doesn't have any default ordering defined.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
class UnorderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
admin.site.register(UnorderedObject, UnorderedObjectAdmin)
model_admin = UnorderedObjectAdmin(UnorderedObject, admin.site)
counter = 0 if ascending else 51
for page in range (0, 5):
request = self._mocked_authenticated_request('/unorderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
admin.site.unregister(UnorderedObject)
# When no order is defined at all, everything is ordered by '-pk'.
check_results_order()
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
UnorderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
UnorderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
UnorderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
UnorderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
UnorderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_deterministic_order_for_model_ordered_by_its_manager(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model has a manager that defines a default ordering.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
OrderedObject.objects.create(id=counter, bool=True, number=counter)
class OrderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
admin.site.register(OrderedObject, OrderedObjectAdmin)
model_admin = OrderedObjectAdmin(OrderedObject, admin.site)
counter = 0 if ascending else 51
for page in range (0, 5):
request = self._mocked_authenticated_request('/orderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
admin.site.unregister(OrderedObject)
# When no order is defined at all, use the model's default ordering (i.e. 'number')
check_results_order(ascending=True)
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
OrderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
OrderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
OrderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
OrderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
OrderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Logging and Summary Operations."""
# pylint: disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_logging_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_logging_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util.deprecation import deprecated
# The python wrapper for Assert is in control_flow_ops, as the Assert
# call relies on certain conditionals for its dependencies. Use
# control_flow_ops.Assert.
# Assert and Print are special symbols in python, so we must
# use an upper-case version of them.
def Print(input_, data, message=None, first_n=None, summarize=None,
name=None):
"""Prints a list of tensors.
This is an identity op with the side effect of printing `data` when
evaluating.
Note: This op prints to the standard error. It is not currently compatible
with jupyter notebook (printing to the notebook *server's* output, not into
the notebook).
Args:
input_: A tensor passed through this op.
data: A list of tensors to print out when op is evaluated.
message: A string, prefix of the error message.
first_n: Only log `first_n` number of times. Negative numbers log always;
this is the default.
summarize: Only print this many entries of each tensor. If None, then a
maximum of 3 elements are printed per input tensor.
name: A name for the operation (optional).
Returns:
Same tensor as `input_`.
"""
return gen_logging_ops._print(input_, data, message, first_n, summarize, name)
@ops.RegisterGradient("Print")
def _PrintGrad(op, *grad):
return list(grad) + [None] * (len(op.inputs) - 1)
def _Collect(val, collections, default_collections):
if collections is None:
collections = default_collections
for key in collections:
ops.add_to_collection(key, val)
@deprecated(
"2016-11-30", "Please switch to tf.summary.histogram. Note that "
"tf.summary.histogram uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in.")
def histogram_summary(tag, values, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with a histogram.
This ops is deprecated. Please switch to tf.summary.histogram.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://www.tensorflow.org/code/tensorflow/contrib/deprecated/__init__.py)
The generated
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
has one summary value containing a histogram for `values`.
This op reports an `InvalidArgument` error if any value is not finite.
Args:
tag: A `string` `Tensor`. 0-D. Tag to use for the summary value.
values: A real numeric `Tensor`. Any shape. Values to use to
build the histogram.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "HistogramSummary", [tag, values]) as scope:
val = gen_logging_ops._histogram_summary(
tag=tag, values=values, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated(
"2016-11-30", "Please switch to tf.summary.image. Note that "
"tf.summary.image uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in. Also, the max_images "
"argument was renamed to max_outputs.")
def image_summary(tag, tensor, max_images=3, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with images.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://www.tensorflow.org/code/tensorflow/contrib/deprecated/__init__.py)
The summary has up to `max_images` summary values containing images. The
images are built from `tensor` which must be 4-D with shape `[batch_size,
height, width, channels]` and where `channels` can be:
* 1: `tensor` is interpreted as Grayscale.
* 3: `tensor` is interpreted as RGB.
* 4: `tensor` is interpreted as RGBA.
The images have the same number of channels as the input tensor. For float
input, the values are normalized one image at a time to fit in the range
`[0, 255]`. `uint8` values are unchanged. The op uses two different
normalization algorithms:
* If the input values are all positive, they are rescaled so the largest one
is 255.
* If any input value is negative, the values are shifted so input value 0.0
is at 127. They are then rescaled so that either the smallest value is 0,
or the largest one is 255.
The `tag` argument is a scalar `Tensor` of type `string`. It is used to
build the `tag` of the summary values:
* If `max_images` is 1, the summary value tag is '*tag*/image'.
* If `max_images` is greater than 1, the summary value tags are
generated sequentially as '*tag*/image/0', '*tag*/image/1', etc.
Args:
tag: A scalar `Tensor` of type `string`. Used to build the `tag`
of the summary values.
tensor: A 4-D `uint8` or `float32` `Tensor` of shape `[batch_size, height,
width, channels]` where `channels` is 1, 3, or 4.
max_images: Max number of batch elements to generate images for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [ops.GraphKeys.SUMMARIES]
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "ImageSummary", [tag, tensor]) as scope:
val = gen_logging_ops._image_summary(
tag=tag, tensor=tensor, max_images=max_images, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated(
"2016-11-30", "Please switch to tf.summary.audio. Note that "
"tf.summary.audio uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in.")
def audio_summary(tag,
tensor,
sample_rate,
max_outputs=3,
collections=None,
name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with audio.
This op is deprecated. Please switch to tf.summary.audio.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://www.tensorflow.org/code/tensorflow/contrib/deprecated/__init__.py)
The summary has up to `max_outputs` summary values containing audio. The
audio is built from `tensor` which must be 3-D with shape `[batch_size,
frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are
assumed to be in the range of `[-1.0, 1.0]` with a sample rate of
`sample_rate`.
The `tag` argument is a scalar `Tensor` of type `string`. It is used to
build the `tag` of the summary values:
* If `max_outputs` is 1, the summary value tag is '*tag*/audio'.
* If `max_outputs` is greater than 1, the summary value tags are
generated sequentially as '*tag*/audio/0', '*tag*/audio/1', etc.
Args:
tag: A scalar `Tensor` of type `string`. Used to build the `tag`
of the summary values.
tensor: A 3-D `float32` `Tensor` of shape `[batch_size, frames, channels]`
or a 2-D `float32` `Tensor` of shape `[batch_size, frames]`.
sample_rate: A Scalar `float32` `Tensor` indicating the sample rate of the
signal in hertz.
max_outputs: Max number of batch elements to generate audio for.
collections: Optional list of ops.GraphKeys. The collections to add the
summary to. Defaults to [ops.GraphKeys.SUMMARIES]
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "AudioSummary", [tag, tensor]) as scope:
sample_rate = ops.convert_to_tensor(sample_rate, dtype=dtypes.float32,
name="sample_rate")
val = gen_logging_ops._audio_summary_v2(tag=tag,
tensor=tensor,
max_outputs=max_outputs,
sample_rate=sample_rate,
name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
@deprecated("2016-11-30", "Please switch to tf.summary.merge.")
def merge_summary(inputs, collections=None, name=None):
# pylint: disable=line-too-long
"""Merges summaries.
This op is deprecated. Please switch to tf.summary.merge, which has identical
behavior.
This op creates a
[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto)
protocol buffer that contains the union of all the values in the input
summaries.
When the Op is run, it reports an `InvalidArgument` error if multiple values
in the summaries to merge use the same tag.
Args:
inputs: A list of `string` `Tensor` objects containing serialized `Summary`
protocol buffers.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer resulting from the merging.
"""
with ops.name_scope(name, "MergeSummary", inputs):
val = gen_logging_ops._merge_summary(inputs=inputs, name=name)
_Collect(val, collections, [])
return val
@deprecated("2016-11-30", "Please switch to tf.summary.merge_all.")
def merge_all_summaries(key=ops.GraphKeys.SUMMARIES):
"""Merges all summaries collected in the default graph.
This op is deprecated. Please switch to tf.summary.merge_all, which has
identical behavior.
Args:
key: `GraphKey` used to collect the summaries. Defaults to
`GraphKeys.SUMMARIES`.
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_ops = ops.get_collection(key)
if not summary_ops:
return None
else:
return merge_summary(summary_ops)
def get_summary_op():
"""Returns a single Summary op that would run all summaries.
Either existing one from `SUMMARY_OP` collection or merges all existing
summaries.
Returns:
If no summaries were collected, returns None. Otherwise returns a scalar
`Tensor` of type `string` containing the serialized `Summary` protocol
buffer resulting from the merging.
"""
summary_op = ops.get_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is not None:
if summary_op:
summary_op = summary_op[0]
else:
summary_op = None
if summary_op is None:
summary_op = merge_all_summaries()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
return summary_op
@deprecated(
"2016-11-30", "Please switch to tf.summary.scalar. Note that "
"tf.summary.scalar uses the node name instead of the tag. "
"This means that TensorFlow will automatically de-duplicate summary "
"names based on the scope they are created in. Also, passing a "
"tensor or list of tags to a scalar summary op is no longer "
"supported.")
def scalar_summary(tags, values, collections=None, name=None):
# pylint: disable=line-too-long
"""Outputs a `Summary` protocol buffer with scalar values.
This ops is deprecated. Please switch to tf.summary.scalar.
For an explanation of why this op was deprecated, and information on how to
migrate, look ['here'](https://www.tensorflow.org/code/tensorflow/contrib/deprecated/__init__.py)
The input `tags` and `values` must have the same shape. The generated
summary has a summary value for each tag-value pair in `tags` and `values`.
Args:
tags: A `string` `Tensor`. Tags for the summaries.
values: A real numeric Tensor. Values for the summaries.
collections: Optional list of graph collections keys. The new summary op is
added to these collections. Defaults to `[GraphKeys.SUMMARIES]`.
name: A name for the operation (optional).
Returns:
A scalar `Tensor` of type `string`. The serialized `Summary` protocol
buffer.
"""
with ops.name_scope(name, "ScalarSummary", [tags, values]) as scope:
val = gen_logging_ops._scalar_summary(tags=tags, values=values, name=scope)
_Collect(val, collections, [ops.GraphKeys.SUMMARIES])
return val
ops.NotDifferentiable("HistogramSummary")
ops.NotDifferentiable("ImageSummary")
ops.NotDifferentiable("AudioSummary")
ops.NotDifferentiable("AudioSummaryV2")
ops.NotDifferentiable("MergeSummary")
ops.NotDifferentiable("ScalarSummary")
| |
"""DHCPv6 Prefix Delegation"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_msg
import srv_control
import misc
import references
@pytest.mark.v6
@pytest.mark.PD
@pytest.mark.rfc3633
def test_prefix_delegation_onlyPD_request():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::3')
srv_control.config_srv_prefix('2001:db8:1::', 0, 90, 92)
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.PD
@pytest.mark.rfc3633
def test_prefix_delegation_IA_and_PD_request():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::1')
srv_control.config_srv_prefix('2001:db8:1::', 0, 90, 92)
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::1')
misc.test_procedure()
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
srv_msg.response_check_suboption_content(5, 3, 'addr', '3000::1')
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.PD
@pytest.mark.rfc3633
def test_prefix_delegation_onlyPD_request_release():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::3')
srv_control.config_srv_prefix('2001:db8:1::', 0, 90, 91)
# pool of two prefixes
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
misc.test_procedure()
srv_msg.client_copy_option('IA_PD')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 25, 'statuscode', 0)
# tests MUST NOT include 'NoBinding'...
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.PD
@pytest.mark.rfc3633
def test_prefix_delegation_onlyPD_multiple_request_release():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::3')
srv_control.config_srv_prefix('2001:db8:1::', 0, 90, 91)
# pool of two prefixes
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
misc.test_procedure()
srv_msg.client_copy_option('IA_PD')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 25, 'statuscode', 0)
misc.test_procedure()
srv_msg.generate_new('IA_PD')
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
misc.test_procedure()
srv_msg.generate_new('IA_PD')
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
# if it fails, it means that release process fails.
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.PD
@pytest.mark.rfc3633
def test_prefix_delegation_IA_and_PD_request_release():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::3')
srv_control.config_srv_prefix('2001:db8:1::', 0, 90, 91)
# pool of two prefixes
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_copy_option('IA_PD')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_include_option(3)
# tests MUST NOT include 'NoBinding'...
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.PD
@pytest.mark.rfc3633
def test_prefix_delegation_IA_and_PD_multiple_request_release():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::3')
srv_control.config_srv_prefix('2001:db8:1::', 0, 90, 91)
# pool of two prefixes
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.client_copy_option('IA_PD')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(13)
# tests MUST NOT include 'NoBinding'...
misc.test_procedure()
srv_msg.generate_new('IA_PD')
srv_msg.generate_new('IA')
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
misc.test_procedure()
srv_msg.generate_new('IA_PD')
srv_msg.generate_new('IA')
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 5)
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.PD
@pytest.mark.rfc3633
def test_prefix_delegation_noprefixavail_release():
# assign 2 prefixes, try third, fail, release one, assign one more time with success.
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::3')
srv_control.config_srv_prefix('2001:db8:1::', 0, 90, 91)
# pool of two prefixes
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
# success
misc.test_procedure()
srv_msg.generate_new('IA_PD')
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
# both prefixes assigned.
misc.test_procedure()
srv_msg.client_save_option('IA_PD')
srv_msg.generate_new('IA_PD')
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 25, 'statuscode', 6)
misc.test_procedure()
srv_msg.client_add_saved_option()
srv_msg.client_copy_option('server-id')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 25, 'statuscode', 0)
misc.test_procedure()
srv_msg.generate_new('IA_PD')
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.PD
@pytest.mark.rfc3633
def test_prefix_delegation_noprefixavail():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::3')
srv_control.config_srv_prefix('2001:db8:1::', 0, 90, 91)
# pool of two prefixes
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
misc.test_procedure()
srv_msg.generate_new('IA_PD')
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
# both prefixes assigned.
misc.test_procedure()
srv_msg.generate_new('IA_PD')
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 25, 'statuscode', 6)
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.PD
@pytest.mark.rfc3633
def test_prefix_delegation_release_nobinding():
misc.test_setup()
srv_control.config_srv_subnet('3000::/32', '3000::1-3000::2')
srv_control.config_srv_prefix('2001:db8:1::', 0, 32, 33)
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 25, 'statuscode', 3)
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.PD
@pytest.mark.rfc3633
def test_prefix_delegation_release_dual_nobinding():
misc.test_setup()
srv_control.config_srv_subnet('3000::/32', '3000::1-3000::2')
srv_control.config_srv_prefix('2001:db8:1::', 0, 32, 33)
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 25, 'statuscode', 3)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 3, 'statuscode', 3)
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.PD
@pytest.mark.rfc3633
def test_prefix_delegation_release_nobinding2():
misc.test_setup()
srv_control.config_srv_subnet('3000::/32', '3000::1-3000::2')
srv_control.config_srv_prefix('2001:db8:1::', 0, 32, 33)
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_save_option('IA_PD')
srv_msg.client_add_saved_option()
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
# must not contain status code == 3.
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_add_saved_option()
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('RELEASE')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 25, 'statuscode', 3)
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.PD
@pytest.mark.rfc3633
def test_prefix_delegation_onlyPD_relay():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::3')
srv_control.config_srv_prefix('2001:db8:1::', 0, 90, 92)
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_does_include('RelayAgent', 'interface-id')
srv_msg.create_relay_forward()
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'RELAYREPLY')
srv_msg.response_check_include_option(18)
srv_msg.response_check_include_option(9)
srv_msg.response_check_include_option(9)
# add test after Scapy fix
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.PD
@pytest.mark.rfc3633
def test_prefix_delegation_assign_saved_iapd():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::3')
# two prefixes - 3000::/91; 3000::20:0:0/91;
srv_control.config_srv_prefix('2001:db8:1::', 0, 90, 91)
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
# 1st prefix
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
misc.test_procedure()
srv_msg.generate_new('IA_PD')
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_save_option('IA_PD')
srv_msg.client_add_saved_option()
# 2nd prefix
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
# both prefixes assigned.
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::3')
srv_control.config_srv_prefix('2001:db8:1::', 0, 80, 95)
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_add_saved_option(erase=True)
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
srv_msg.response_check_suboption_content(26, 25, 'prefix', '2001:db8:1::20:0:0')
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.PD
@pytest.mark.rfc3633
@pytest.mark.disabled
def test_prefix_delegation_compare_prefixes_after_client_reboot():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::300')
srv_control.config_srv_prefix('2001:db8:1::', 0, 90, 96)
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
# save prefix value
prefix1 = srv_msg.get_suboption('IA_PD', 'IA-Prefix')[0]
misc.test_procedure()
srv_msg.client_does_include('Client', 'IA-PD')
# client reboot
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
# compare assigned prefix with the saved one
prefix2 = srv_msg.get_suboption('IA_PD', 'IA-Prefix')[0]
assert prefix1.prefix == prefix2.prefix
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.PD
def test_prefix_delegation_just_PD_configured_PD_requested():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '$(EMPTY)')
srv_control.config_srv_prefix('2001:db8:1::', 0, 90, 96)
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
srv_msg.response_check_include_option(3, expect_include=False)
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
srv_msg.response_check_include_option(3, expect_include=False)
@pytest.mark.v6
@pytest.mark.PD
def test_prefix_delegation_just_PD_configured_PD_and_IA_requested():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '$(EMPTY)')
srv_control.config_srv_prefix('2001:db8:1::', 0, 90, 96)
srv_control.build_and_send_config_files()
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', 'IA-PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'ADVERTISE')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 3, 'statuscode', 2)
misc.test_procedure()
srv_msg.client_does_include('Client', 'IA-NA')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', 'REPLY')
srv_msg.response_check_include_option(25)
srv_msg.response_check_option_content(25, 'sub-option', 26)
srv_msg.response_check_include_option(3)
srv_msg.response_check_option_content(3, 'sub-option', 13)
srv_msg.response_check_suboption_content(13, 3, 'statuscode', 2)
| |
import logging
from .attribute_converter import to_local
from saml2 import time_util
from saml2.s_utils import OtherError
from saml2.validate import valid_instance
from saml2.validate import NotValid
from saml2.response import IncorrectlySigned
logger = logging.getLogger(__name__)
def _dummy(_arg):
return None
class Request(object):
def __init__(self, sec_context, receiver_addrs, attribute_converters=None,
timeslack=0):
self.sec = sec_context
self.receiver_addrs = receiver_addrs
self.timeslack = timeslack
self.xmlstr = ""
self.name_id = ""
self.message = None
self.not_on_or_after = 0
self.attribute_converters = attribute_converters
self.binding = None
self.relay_state = ""
self.signature_check = _dummy # has to be set !!!
def _clear(self):
self.xmlstr = ""
self.name_id = ""
self.message = None
self.not_on_or_after = 0
def _loads(self, xmldata, binding=None, origdoc=None):
# own copy
self.xmlstr = xmldata[:]
logger.info("xmlstr: %s" % (self.xmlstr,))
try:
self.message = self.signature_check(xmldata, origdoc=origdoc)
except TypeError:
raise
except Exception as excp:
logger.info("EXCEPTION: %s", excp)
if not self.message:
logger.error("Response was not correctly signed")
logger.info(xmldata)
raise IncorrectlySigned()
logger.info("request: %s" % (self.message,))
try:
valid_instance(self.message)
except NotValid as exc:
logger.error("Not valid request: %s" % exc.args[0])
raise
return self
def issue_instant_ok(self):
""" Check that the request was issued at a reasonable time """
upper = time_util.shift_time(time_util.time_in_a_while(days=1),
self.timeslack).timetuple()
lower = time_util.shift_time(time_util.time_a_while_ago(days=1),
- self.timeslack).timetuple()
# print "issue_instant: %s" % self.message.issue_instant
# print "%s < x < %s" % (lower, upper)
issued_at = time_util.str_to_time(self.message.issue_instant)
return issued_at > lower and issued_at < upper
def _verify(self):
assert self.message.version == "2.0"
if self.message.destination and \
self.message.destination not in self.receiver_addrs:
logger.error("%s not in %s" % (self.message.destination,
self.receiver_addrs))
raise OtherError("Not destined for me!")
assert self.issue_instant_ok()
return self
def loads(self, xmldata, binding, origdoc=None):
return self._loads(xmldata, binding, origdoc)
def verify(self):
try:
return self._verify()
except AssertionError:
return None
def subject_id(self):
""" The name of the subject can be in either of
BaseID, NameID or EncryptedID
:return: The identifier if there is one
"""
if "subject" in list(self.message.keys()):
_subj = self.message.subject
if "base_id" in list(_subj.keys()) and _subj.base_id:
return _subj.base_id
elif _subj.name_id:
return _subj.name_id
else:
if "base_id" in list(self.message.keys()) and self.message.base_id:
return self.message.base_id
elif self.message.name_id:
return self.message.name_id
else: # EncryptedID
pass
def sender(self):
return self.message.issuer.text
class LogoutRequest(Request):
msgtype = "logout_request"
def __init__(self, sec_context, receiver_addrs, attribute_converters=None,
timeslack=0):
Request.__init__(self, sec_context, receiver_addrs,
attribute_converters, timeslack)
self.signature_check = self.sec.correctly_signed_logout_request
class AttributeQuery(Request):
msgtype = "attribute_query"
def __init__(self, sec_context, receiver_addrs, attribute_converters=None,
timeslack=0):
Request.__init__(self, sec_context, receiver_addrs,
attribute_converters, timeslack)
self.signature_check = self.sec.correctly_signed_attribute_query
def attribute(self):
""" Which attributes that are sought for """
return []
class AuthnRequest(Request):
msgtype = "authn_request"
def __init__(self, sec_context, receiver_addrs, attribute_converters,
timeslack=0):
Request.__init__(self, sec_context, receiver_addrs,
attribute_converters, timeslack)
self.signature_check = self.sec.correctly_signed_authn_request
def attributes(self):
return to_local(self.attribute_converters, self.message)
class AuthnQuery(Request):
msgtype = "authn_query"
def __init__(self, sec_context, receiver_addrs, attribute_converters,
timeslack=0):
Request.__init__(self, sec_context, receiver_addrs,
attribute_converters, timeslack)
self.signature_check = self.sec.correctly_signed_authn_query
def attributes(self):
return to_local(self.attribute_converters, self.message)
class AssertionIDRequest(Request):
msgtype = "assertion_id_request"
def __init__(self, sec_context, receiver_addrs, attribute_converters,
timeslack=0):
Request.__init__(self, sec_context, receiver_addrs,
attribute_converters, timeslack)
self.signature_check = self.sec.correctly_signed_assertion_id_request
def attributes(self):
return to_local(self.attribute_converters, self.message)
class AuthzDecisionQuery(Request):
msgtype = "authz_decision_query"
def __init__(self, sec_context, receiver_addrs,
attribute_converters=None, timeslack=0):
Request.__init__(self, sec_context, receiver_addrs,
attribute_converters, timeslack)
self.signature_check = self.sec.correctly_signed_authz_decision_query
def action(self):
""" Which action authorization is requested for """
pass
def evidence(self):
""" The evidence on which the decision is based """
pass
def resource(self):
""" On which resource the action is expected to occur """
pass
class NameIDMappingRequest(Request):
msgtype = "name_id_mapping_request"
def __init__(self, sec_context, receiver_addrs, attribute_converters,
timeslack=0):
Request.__init__(self, sec_context, receiver_addrs,
attribute_converters, timeslack)
self.signature_check = self.sec.correctly_signed_name_id_mapping_request
class ManageNameIDRequest(Request):
msgtype = "manage_name_id_request"
def __init__(self, sec_context, receiver_addrs, attribute_converters,
timeslack=0):
Request.__init__(self, sec_context, receiver_addrs,
attribute_converters, timeslack)
self.signature_check = self.sec.correctly_signed_manage_name_id_request
SERVICE2REQUEST = {
"single_sign_on_service": AuthnRequest,
"attribute_service": AttributeQuery,
"authz_service": AuthzDecisionQuery,
"assertion_id_request_service": AssertionIDRequest,
"authn_query_service": AuthnQuery,
"manage_name_id_service": ManageNameIDRequest,
"name_id_mapping_service": NameIDMappingRequest,
#"artifact_resolve_service": ArtifactResolve,
"single_logout_service": LogoutRequest
}
| |
"""
This is our testing framework.
Goals:
* it should be compatible with py.test and operate very similarly (or
identically)
* doesn't require any external dependencies
* preferably all the functionality should be in this file only
* no magic, just import the test file and execute the test functions, that's it
* portable
"""
import os
import sys
import inspect
import traceback
import pdb
from glob import glob
from timeit import default_timer as clock
def isgeneratorfunction(object):
"""
Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
"""
CO_GENERATOR = 0x20
if (inspect.isfunction(object) or inspect.ismethod(object)) and \
object.func_code.co_flags & CO_GENERATOR:
return True
return False
def test(*paths, **kwargs):
"""
Runs the tests specified by paths, or all tests if paths=[].
Note: paths are specified relative to the sympy root directory in a unix
format (on all platforms including windows).
Examples:
Run all tests:
>> import sympy
>> sympy.test()
Run one file:
>> import sympy
>> sympy.test("sympy/core/tests/test_basic.py")
Run all tests in sympy/functions/ and some particular file:
>> import sympy
>> sympy.test("sympy/core/tests/test_basic.py", "sympy/functions")
"""
verbose = kwargs.get("verbose", False)
tb = kwargs.get("tb", "short")
kw = kwargs.get("kw", "")
post_mortem = kwargs.get("pdb", False)
colors = kwargs.get("colors", True)
r = PyTestReporter(verbose, tb, colors)
t = SymPyTests(r, kw, post_mortem)
if len(paths) > 0:
t.add_paths(paths)
else:
t.add_paths(["sympy"])
return t.test()
def doctest(*paths, **kwargs):
"""
Runs the doctests specified by paths, or all tests if paths=[].
Note: paths are specified relative to the sympy root directory in a unix
format (on all platforms including windows).
Examples:
Run all tests:
>> import sympy
>> sympy.doctest()
Run one file:
>> import sympy
>> sympy.doctest("sympy/core/tests/test_basic.py")
Run all tests in sympy/functions/ and some particular file:
>> import sympy
>> sympy.doctest("sympy/core/tests/test_basic.py", "sympy/functions")
"""
verbose = kwargs.get("verbose", False)
blacklist = kwargs.get("blacklist", [])
blacklist.extend([
"sympy/thirdparty/pyglet", # segfaults
"sympy/mpmath", # needs to be fixed upstream
"sympy/plotting", # generates live plots
"sympy/utilities/compilef.py", # needs tcc
"sympy/galgebra/GA.py", # needs numpy
"sympy/galgebra/latex_ex.py", # needs numpy
"sympy/conftest.py", # needs py.test
"sympy/utilities/benchmarking.py", # needs py.test
])
r = PyTestReporter(verbose)
t = SymPyDocTests(r, blacklist=blacklist)
if len(paths) > 0:
t.add_paths(paths)
else:
t.add_paths(["sympy"])
dtest = t.test()
# test documentation under doc/src/
import doctest
excluded = ['doc/src/modules/plotting.txt']
doc_files = glob('doc/src/*.txt') + glob('doc/src/modules/*.txt')
for ex in excluded:
doc_files.remove(ex)
for doc_file in doc_files:
print "Testing ", doc_file
print "Failed %s, tested %s" % doctest.testfile(doc_file, module_relative=False)
return dtest
class SymPyTests(object):
def __init__(self, reporter, kw="", post_mortem=False):
self._post_mortem = post_mortem
self._kw = kw
self._count = 0
self._root_dir = self.get_sympy_dir()
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._tests = []
def add_paths(self, paths):
for path in paths:
path2 = os.path.join(self._root_dir, *path.split("/"))
if path2.endswith(".py"):
self._tests.append(path2)
else:
self._tests.extend(self.get_tests(path2))
def test(self):
"""
Runs the tests.
Returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._tests:
try:
self.test_file(f)
except KeyboardInterrupt:
print " interrupted by user"
break
return self._reporter.finish()
def test_file(self, filename):
name = "test%d" % self._count
name = os.path.splitext(os.path.basename(filename))[0]
self._count += 1
gl = {'__file__':filename}
try:
execfile(filename, gl)
except (ImportError, SyntaxError):
self._reporter.import_error(filename, sys.exc_info())
return
pytestfile = ""
if "XFAIL" in gl:
pytestfile = inspect.getsourcefile(gl["XFAIL"])
disabled = gl.get("disabled", False)
if disabled:
funcs = []
else:
# we need to filter only those functions that begin with 'test_'
# that are defined in the testing file or in the file where
# is defined the XFAIL decorator
funcs = [gl[f] for f in gl.keys() if f.startswith("test_") and
(inspect.isfunction(gl[f])
or inspect.ismethod(gl[f])) and
(inspect.getsourcefile(gl[f]) == filename or
inspect.getsourcefile(gl[f]) == pytestfile)]
# Sorting of XFAILed functions isn't fixed yet :-(
funcs.sort(key=lambda x: inspect.getsourcelines(x)[1])
i = 0
while i is not len(funcs):
if isgeneratorfunction(funcs[i]):
# some tests can be generators, that return the actual
# test functions. We unpack it below:
f = funcs.pop(i)
for fg in f():
func = fg[0]
args = fg[1:]
fgw = lambda: func(*args)
funcs.insert(i, fgw)
i += 1
else:
i += 1
# drop functions that are not selected with the keyword expression:
funcs = [x for x in funcs if self.matches(x)]
self._reporter.entering_filename(filename, len(funcs))
for f in funcs:
self._reporter.entering_test(f)
try:
f()
except KeyboardInterrupt:
raise
except:
t, v, tr = sys.exc_info()
if t is AssertionError:
self._reporter.test_fail((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
elif t.__name__ == "Skipped":
self._reporter.test_skip()
elif t.__name__ == "XFail":
self._reporter.test_xfail()
elif t.__name__ == "XPass":
self._reporter.test_xpass(v)
else:
self._reporter.test_exception((t, v, tr))
if self._post_mortem:
pdb.post_mortem(tr)
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def get_sympy_dir(self):
"""
Returns the root sympy directory.
"""
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
return sympy_dir
def matches(self, x):
"""
Does the keyword expression self._kw match "x"? Returns True/False.
Always returns True if self._kw is "".
"""
if self._kw == "":
return True
return x.__name__.find(self._kw) != -1
def get_paths(self, dir="", level=15):
"""
Generates a set of paths for testfiles searching.
Example:
>> get_paths(2)
['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py']
>> get_paths(6)
['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py',
'sympy/*/*/*/test_*.py', 'sympy/*/*/*/*/test_*.py',
'sympy/*/*/*/*/*/test_*.py', 'sympy/*/*/*/*/*/*/test_*.py']
"""
wildcards = [dir]
for i in range(level):
wildcards.append(os.path.join(wildcards[-1], "*"))
p = [os.path.join(x, "test_*.py") for x in wildcards]
return p
def get_tests(self, dir):
"""
Returns the list of tests.
"""
g = []
for x in self.get_paths(dir):
g.extend(glob(x))
g = list(set(g))
g.sort()
return g
class SymPyDocTests(object):
def __init__(self, reporter, blacklist=[]):
self._count = 0
self._root_dir = self.get_sympy_dir()
self._reporter = reporter
self._reporter.root_dir(self._root_dir)
self._tests = []
self._blacklist = blacklist
def add_paths(self, paths):
for path in paths:
path2 = os.path.join(self._root_dir, *path.split("/"))
if path2.endswith(".py"):
self._tests.append(path2)
else:
self._tests.extend(self.get_tests(path2))
def test(self):
"""
Runs the tests.
Returns True if all tests pass, otherwise False.
"""
self._reporter.start()
for f in self._tests:
try:
self.test_file(f)
except KeyboardInterrupt:
print " interrupted by user"
break
return self._reporter.finish()
def test_file(self, filename):
def setup_pprint():
from sympy import pprint_use_unicode
# force pprint to be in ascii mode in doctests
pprint_use_unicode(False)
# hook our nice, hash-stable strprinter
from sympy.interactive import init_printing
from sympy.printing import sstrrepr
init_printing(sstrrepr)
import doctest
import unittest
from StringIO import StringIO
rel_name = filename[len(self._root_dir)+1:]
module = rel_name.replace('/', '.')[:-3]
setup_pprint()
try:
module = doctest._normalize_module(module)
tests = doctest.DocTestFinder().find(module)
except:
self._reporter.import_error(filename, sys.exc_info())
return
tests.sort()
tests = [test for test in tests if len(test.examples) > 0]
self._reporter.entering_filename(filename, len(tests))
for test in tests:
assert len(test.examples) != 0
runner = doctest.DocTestRunner()
old = sys.stdout
new = StringIO()
sys.stdout = new
try:
f, t = runner.run(test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if f > 0:
self._reporter.doctest_fail(test.name, new.getvalue())
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
def get_sympy_dir(self):
"""
Returns the root sympy directory.
"""
this_file = os.path.abspath(__file__)
sympy_dir = os.path.join(os.path.dirname(this_file), "..", "..")
sympy_dir = os.path.normpath(sympy_dir)
return sympy_dir
def get_paths(self, dir="", level=15):
"""
Generates a set of paths for testfiles searching.
Example:
>> get_paths(2)
['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py']
>> get_paths(6)
['sympy/test_*.py', 'sympy/*/test_*.py', 'sympy/*/*/test_*.py',
'sympy/*/*/*/test_*.py', 'sympy/*/*/*/*/test_*.py',
'sympy/*/*/*/*/*/test_*.py', 'sympy/*/*/*/*/*/*/test_*.py']
"""
wildcards = [dir]
for i in range(level):
wildcards.append(os.path.join(wildcards[-1], "*"))
p = [os.path.join(x, "*.py") for x in wildcards]
return p
def is_on_blacklist(self, x):
"""
Returns True if "x" is on the blacklist. Otherwise False.
"""
for p in self._blacklist:
if x.find(p) != -1:
return True
return False
def get_tests(self, dir):
"""
Returns the list of tests.
"""
def importable(x):
"""
Checks if given pathname x is an importable module by checking for
__init__.py file.
Returns True/False.
Currently we only test if the __init__.py file exists in the
directory with the file "x" (in theory we should also test all the
parent dirs) and if "x" is not on self._blacklist.
"""
if self.is_on_blacklist(x):
return False
init_py = os.path.dirname(x) + os.path.sep + "__init__.py"
return os.path.exists(init_py)
g = []
for x in self.get_paths(dir):
g.extend(glob(x))
g = list(set(g))
g.sort()
# skip files that are not importable (i.e. missing __init__.py)
g = [x for x in g if importable(x)]
return g
class Reporter(object):
"""
Parent class for all reporters.
"""
pass
class PyTestReporter(Reporter):
"""
Py.test like reporter. Should produce output identical to py.test.
"""
def __init__(self, verbose=False, tb="short", colors=True):
self._verbose = verbose
self._tb_style = tb
self._colors = colors
self._xfailed = 0
self._xpassed = []
self._failed = []
self._failed_doctest = []
self._passed = 0
self._skipped = 0
self._exceptions = []
# this tracks the x-position of the cursor (useful for positioning
# things on the screen), without the need for any readline library:
self._write_pos = 0
self._line_wrap = False
def root_dir(self, dir):
self._root_dir = dir
def write(self, text, color="", align="left", width=80):
"""
Prints a text on the screen.
It uses sys.stdout.write(), so no readline library is necessary.
color ... choose from the colors below, "" means default color
align ... left/right, left is a normal print, right is aligned on the
right hand side of the screen, filled with " " if necessary
width ... the screen width
"""
color_templates = (
("Black" , "0;30"),
("Red" , "0;31"),
("Green" , "0;32"),
("Brown" , "0;33"),
("Blue" , "0;34"),
("Purple" , "0;35"),
("Cyan" , "0;36"),
("LightGray" , "0;37"),
("DarkGray" , "1;30"),
("LightRed" , "1;31"),
("LightGreen" , "1;32"),
("Yellow" , "1;33"),
("LightBlue" , "1;34"),
("LightPurple" , "1;35"),
("LightCyan" , "1;36"),
("White" , "1;37"), )
colors = {}
for name, value in color_templates:
colors[name] = value
c_normal = '\033[0m'
c_color = '\033[%sm'
if align == "right":
if self._write_pos+len(text) > width:
# we don't fit on the current line, create a new line
self.write("\n")
self.write(" "*(width-self._write_pos-len(text)))
if not sys.stdout.isatty():
# the stdout is not a terminal, this for example happens if the
# output is piped to less, e.g. "bin/test | less". In this case,
# the terminal control sequences would be printed verbatim, so
# don't use any colors.
color = ""
if self._line_wrap:
if text[0] != "\n":
sys.stdout.write("\n")
if color == "":
sys.stdout.write(text)
else:
sys.stdout.write("%s%s%s" % (c_color % colors[color], text, c_normal))
sys.stdout.flush()
l = text.rfind("\n")
if l == -1:
self._write_pos += len(text)
else:
self._write_pos = len(text)-l-1
self._line_wrap = self._write_pos >= width
self._write_pos %= width
def write_center(self, text, delim="="):
width = 80
if text != "":
text = " %s " % text
idx = (width-len(text)) // 2
t = delim*idx + text + delim*(width-idx-len(text))
self.write(t+"\n")
def write_exception(self, e, val, tb):
t = traceback.extract_tb(tb)
# remove the first item, as that is always runtests.py
t = t[1:]
t = traceback.format_list(t)
self.write("".join(t))
t = traceback.format_exception_only(e, val)
self.write("".join(t))
def start(self):
self.write_center("test process starts")
executable = sys.executable
v = sys.version_info
python_version = "%s.%s.%s-%s-%s" % v
self.write("executable: %s (%s)\n\n" % (executable, python_version))
self._t_start = clock()
def finish(self):
self._t_end = clock()
self.write("\n")
text = "tests finished: %d passed" % self._passed
if len(self._failed) > 0:
text += ", %d failed" % len(self._failed)
if len(self._failed_doctest) > 0:
text += ", %d failed" % len(self._failed_doctest)
if self._skipped > 0:
text += ", %d skipped" % self._skipped
if self._xfailed > 0:
text += ", %d xfailed" % self._xfailed
if len(self._xpassed) > 0:
text += ", %d xpassed" % len(self._xpassed)
if len(self._exceptions) > 0:
text += ", %d exceptions" % len(self._exceptions)
text += " in %.2f seconds" % (self._t_end - self._t_start)
if len(self._xpassed) > 0:
self.write_center("xpassed tests", "_")
for e in self._xpassed:
self.write("%s:%s\n" % (e[0], e[1]))
self.write("\n")
if self._tb_style != "no" and len(self._exceptions) > 0:
#self.write_center("These tests raised an exception", "_")
for e in self._exceptions:
filename, f, (t, val, tb) = e
self.write_center("", "_")
if f is None:
s = "%s" % filename
else:
s = "%s:%s" % (filename, f.__name__)
self.write_center(s, "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed) > 0:
#self.write_center("Failed", "_")
for e in self._failed:
filename, f, (t, val, tb) = e
self.write_center("", "_")
self.write_center("%s:%s" % (filename, f.__name__), "_")
self.write_exception(t, val, tb)
self.write("\n")
if self._tb_style != "no" and len(self._failed_doctest) > 0:
#self.write_center("Failed", "_")
for e in self._failed_doctest:
filename, msg = e
self.write_center("", "_")
self.write_center("%s" % filename, "_")
self.write(msg)
self.write("\n")
self.write_center(text)
ok = len(self._failed) == 0 and len(self._exceptions) == 0 and \
len(self._failed_doctest) == 0
if not ok:
self.write("DO *NOT* COMMIT!\n")
return ok
def entering_filename(self, filename, n):
rel_name = filename[len(self._root_dir)+1:]
self._active_file = rel_name
self._active_file_error = False
self.write(rel_name)
self.write("[%d] " % n)
def leaving_filename(self):
if self._colors:
self.write(" ")
if self._active_file_error:
self.write("[FAIL]", "Red", align="right")
else:
self.write("[OK]", "Green", align="right")
self.write("\n")
if self._verbose:
self.write("\n")
def entering_test(self, f):
self._active_f = f
if self._verbose:
self.write("\n"+f.__name__+" ")
def test_xfail(self):
self._xfailed += 1
self.write("f")
def test_xpass(self, fname):
self._xpassed.append((self._active_file, fname))
self.write("X")
def test_fail(self, exc_info):
self._failed.append((self._active_file, self._active_f, exc_info))
self.write("F")
self._active_file_error = True
def doctest_fail(self, name, error_msg):
# the first line contains "******", remove it:
error_msg = "\n".join(error_msg.split("\n")[1:])
self._failed_doctest.append((name, error_msg))
self.write("F")
self._active_file_error = True
def test_pass(self):
self._passed += 1
if self._verbose:
self.write("ok")
else:
self.write(".")
def test_skip(self):
self._skipped += 1
self.write("s")
def test_exception(self, exc_info):
self._exceptions.append((self._active_file, self._active_f, exc_info))
self.write("E")
self._active_file_error = True
def import_error(self, filename, exc_info):
self._exceptions.append((filename, None, exc_info))
rel_name = filename[len(self._root_dir)+1:]
self.write(rel_name)
self.write("[?] Failed to import")
if self._colors:
self.write(" ")
self.write("[FAIL]", "Red", align="right")
self.write("\n")
| |
"""
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import csv
import hashlib
import gzip
import shutil
from collections import namedtuple
from os import environ, listdir, makedirs
from os.path import expanduser, isdir, join, splitext
from importlib import resources
from ..utils import Bunch
from ..utils import check_random_state
from ..utils import check_pandas_support
import numpy as np
from urllib.request import urlretrieve
DATA_MODULE = "sklearn.datasets.data"
DESCR_MODULE = "sklearn.datasets.descr"
IMAGES_MODULE = "sklearn.datasets.images"
RemoteFileMetadata = namedtuple("RemoteFileMetadata", ["filename", "url", "checksum"])
def get_data_home(data_home=None) -> str:
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid downloading the
data several times.
By default the data dir is set to a folder named 'scikit_learn_data' in the
user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explicit folder path. The '~'
symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
Parameters
----------
data_home : str, default=None
The path to scikit-learn data directory. If `None`, the default path
is `~/sklearn_learn_data`.
"""
if data_home is None:
data_home = environ.get("SCIKIT_LEARN_DATA", join("~", "scikit_learn_data"))
data_home = expanduser(data_home)
makedirs(data_home, exist_ok=True)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache.
Parameters
----------
data_home : str, default=None
The path to scikit-learn data directory. If `None`, the default path
is `~/sklearn_learn_data`.
"""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def _convert_data_dataframe(
caller_name, data, target, feature_names, target_names, sparse_data=False
):
pd = check_pandas_support("{} with as_frame=True".format(caller_name))
if not sparse_data:
data_df = pd.DataFrame(data, columns=feature_names)
else:
data_df = pd.DataFrame.sparse.from_spmatrix(data, columns=feature_names)
target_df = pd.DataFrame(target, columns=target_names)
combined_df = pd.concat([data_df, target_df], axis=1)
X = combined_df[feature_names]
y = combined_df[target_names]
if y.shape[1] == 1:
y = y.iloc[:, 0]
return combined_df, X, y
def load_files(
container_path,
*,
description=None,
categories=None,
load_content=True,
shuffle=True,
encoding=None,
decode_error="strict",
random_state=0,
):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used as supervised signal label names. The individual
file names are not important.
This function does not try to extract features into a numpy array or scipy
sparse matrix. In addition, if load_content is false it does not try to
load the files in memory.
To use text files in a scikit-learn classification or clustering algorithm,
you will need to use the :mod`~sklearn.feature_extraction.text` module to
build a feature extraction transformer that suits your problem.
If you set load_content=True, you should also specify the encoding of the
text using the 'encoding' parameter. For many modern text files, 'utf-8'
will be the correct encoding. If you leave encoding equal to None, then the
content will be made of bytes instead of Unicode, and you will not be able
to use most functions in :mod:`~sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
container_path : str or unicode
Path to the main folder holding one subfolder per category
description : str or unicode, default=None
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : list of str, default=None
If None (default), load all the categories. If not None, list of
category names to load (other categories ignored).
load_content : bool, default=True
Whether to load or not the content of the different files. If true a
'data' attribute containing the text information is present in the data
structure returned. If not, a filenames attribute gives the path to the
files.
shuffle : bool, default=True
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
encoding : str, default=None
If None, do not try to decode the content of the files (e.g. for images
or other non-text content). If not None, encoding to use to decode text
files to Unicode if load_content is True.
decode_error : {'strict', 'ignore', 'replace'}, default='strict'
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
random_state : int, RandomState instance or None, default=0
Determines random number generation for dataset shuffling. Pass an int
for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : list of str
Only present when `load_content=True`.
The raw text data to learn.
target : ndarray
The target labels (integer index).
target_names : list
The names of target classes.
DESCR : str
The full description of the dataset.
filenames: ndarray
The filenames holding the dataset.
"""
target = []
target_names = []
filenames = []
folders = [
f for f in sorted(listdir(container_path)) if isdir(join(container_path, f))
]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d) for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = []
for filename in filenames:
with open(filename, "rb") as f:
data.append(f.read())
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(
data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description,
)
return Bunch(
filenames=filenames, target_names=target_names, target=target, DESCR=description
)
def load_csv_data(
data_file_name,
*,
data_module=DATA_MODULE,
descr_file_name=None,
descr_module=DESCR_MODULE,
):
"""Loads `data_file_name` from `data_module with `importlib.resources`.
Parameters
----------
data_file_name : str
Name of csv file to be loaded from `data_module/data_file_name`.
For example `'wine_data.csv'`.
data_module : str or module, default='sklearn.datasets.data'
Module where data lives. The default is `'sklearn.datasets.data'`.
descr_file_name : str, default=None
Name of rst file to be loaded from `descr_module/descr_file_name`.
For example `'wine_data.rst'`. See also :func:`load_descr`.
If not None, also returns the corresponding description of
the dataset.
descr_module : str or module, default='sklearn.datasets.descr'
Module where `descr_file_name` lives. See also :func:`load_descr`.
The default is `'sklearn.datasets.descr'`.
Returns
-------
data : ndarray of shape (n_samples, n_features)
A 2D array with each row representing one sample and each column
representing the features of a given sample.
target : ndarry of shape (n_samples,)
A 1D array holding target variables for all the samples in `data`.
For example target[0] is the target variable for data[0].
target_names : ndarry of shape (n_samples,)
A 1D array containing the names of the classifications. For example
target_names[0] is the name of the target[0] class.
descr : str, optional
Description of the dataset (the content of `descr_file_name`).
Only returned if `descr_file_name` is not None.
"""
with resources.open_text(data_module, data_file_name) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float64)
target[i] = np.asarray(ir[-1], dtype=int)
if descr_file_name is None:
return data, target, target_names
else:
assert descr_module is not None
descr = load_descr(descr_module=descr_module, descr_file_name=descr_file_name)
return data, target, target_names, descr
def load_gzip_compressed_csv_data(
data_file_name,
*,
data_module=DATA_MODULE,
descr_file_name=None,
descr_module=DESCR_MODULE,
encoding="utf-8",
**kwargs,
):
"""Loads gzip-compressed `data_file_name` from `data_module` with `importlib.resources`.
1) Open resource file with `importlib.resources.open_binary`
2) Decompress file obj with `gzip.open`
3) Load decompressed data with `np.loadtxt`
Parameters
----------
data_file_name : str
Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from
`data_module/data_file_name`. For example `'diabetes_data.csv.gz'`.
data_module : str or module, default='sklearn.datasets.data'
Module where data lives. The default is `'sklearn.datasets.data'`.
descr_file_name : str, default=None
Name of rst file to be loaded from `descr_module/descr_file_name`.
For example `'wine_data.rst'`. See also :func:`load_descr`.
If not None, also returns the corresponding description of
the dataset.
descr_module : str or module, default='sklearn.datasets.descr'
Module where `descr_file_name` lives. See also :func:`load_descr`.
The default is `'sklearn.datasets.descr'`.
encoding : str, default="utf-8"
Name of the encoding that the gzip-decompressed file will be
decoded with. The default is 'utf-8'.
**kwargs : dict, optional
Keyword arguments to be passed to `np.loadtxt`;
e.g. delimiter=','.
Returns
-------
data : ndarray of shape (n_samples, n_features)
A 2D array with each row representing one sample and each column
representing the features and/or target of a given sample.
descr : str, optional
Description of the dataset (the content of `descr_file_name`).
Only returned if `descr_file_name` is not None.
"""
with resources.open_binary(data_module, data_file_name) as compressed_file:
compressed_file = gzip.open(compressed_file, mode="rt", encoding=encoding)
data = np.loadtxt(compressed_file, **kwargs)
if descr_file_name is None:
return data
else:
assert descr_module is not None
descr = load_descr(descr_module=descr_module, descr_file_name=descr_file_name)
return data, descr
def load_descr(descr_file_name, *, descr_module=DESCR_MODULE):
"""Load `descr_file_name` from `descr_module` with `importlib.resources`.
Parameters
----------
descr_file_name : str, default=None
Name of rst file to be loaded from `descr_module/descr_file_name`.
For example `'wine_data.rst'`. See also :func:`load_descr`.
If not None, also returns the corresponding description of
the dataset.
descr_module : str or module, default='sklearn.datasets.descr'
Module where `descr_file_name` lives. See also :func:`load_descr`.
The default is `'sklearn.datasets.descr'`.
Returns
-------
fdescr : str
Content of `descr_file_name`.
"""
fdescr = resources.read_text(descr_module, descr_file_name)
return fdescr
def load_wine(*, return_X_y=False, as_frame=False):
"""Load and return the wine dataset (classification).
.. versionadded:: 0.18
The wine dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class [59,71,48]
Samples total 178
Dimensionality 13
Features real, positive
================= ==============
Read more in the :ref:`User Guide <wine_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (178, 13)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, Series} of shape (178,)
The classification target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names: list
The names of the dataset columns.
target_names: list
The names of target classes.
frame: DataFrame of shape (178, 14)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
The copy of UCI ML Wine Data Set dataset is downloaded and modified to fit
standard format from:
https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data
Examples
--------
Let's say you are interested in the samples 10, 80, and 140, and want to
know their class name.
>>> from sklearn.datasets import load_wine
>>> data = load_wine()
>>> data.target[[10, 80, 140]]
array([0, 1, 2])
>>> list(data.target_names)
['class_0', 'class_1', 'class_2']
"""
data, target, target_names, fdescr = load_csv_data(
data_file_name="wine_data.csv", descr_file_name="wine_data.rst"
)
feature_names = [
"alcohol",
"malic_acid",
"ash",
"alcalinity_of_ash",
"magnesium",
"total_phenols",
"flavanoids",
"nonflavanoid_phenols",
"proanthocyanins",
"color_intensity",
"hue",
"od280/od315_of_diluted_wines",
"proline",
]
frame = None
target_columns = [
"target",
]
if as_frame:
frame, data, target = _convert_data_dataframe(
"load_wine", data, target, feature_names, target_columns
)
if return_X_y:
return data, target
return Bunch(
data=data,
target=target,
frame=frame,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names,
)
def load_iris(*, return_X_y=False, as_frame=False):
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Read more in the :ref:`User Guide <iris_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object. See
below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (150, 4)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, Series} of shape (150,)
The classification target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names: list
The names of the dataset columns.
target_names: list
The names of target classes.
frame: DataFrame of shape (150, 5)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
filename: str
The path to the location of the data.
.. versionadded:: 0.20
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
Notes
-----
.. versionchanged:: 0.20
Fixed two wrong data points according to Fisher's paper.
The new version is the same as in R, but not as in the UCI
Machine Learning Repository.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
data_file_name = "iris.csv"
data, target, target_names, fdescr = load_csv_data(
data_file_name=data_file_name, descr_file_name="iris.rst"
)
feature_names = [
"sepal length (cm)",
"sepal width (cm)",
"petal length (cm)",
"petal width (cm)",
]
frame = None
target_columns = [
"target",
]
if as_frame:
frame, data, target = _convert_data_dataframe(
"load_iris", data, target, feature_names, target_columns
)
if return_X_y:
return data, target
return Bunch(
data=data,
target=target,
frame=frame,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names,
filename=data_file_name,
data_module=DATA_MODULE,
)
def load_breast_cancer(*, return_X_y=False, as_frame=False):
"""Load and return the breast cancer wisconsin dataset (classification).
The breast cancer dataset is a classic and very easy binary classification
dataset.
================= ==============
Classes 2
Samples per class 212(M),357(B)
Samples total 569
Dimensionality 30
Features real, positive
================= ==============
Read more in the :ref:`User Guide <breast_cancer_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (569, 30)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, Series} of shape (569,)
The classification target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names: list
The names of the dataset columns.
target_names: list
The names of target classes.
frame: DataFrame of shape (569, 31)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
filename: str
The path to the location of the data.
.. versionadded:: 0.20
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
The copy of UCI ML Breast Cancer Wisconsin (Diagnostic) dataset is
downloaded from:
https://goo.gl/U2Uwz2
Examples
--------
Let's say you are interested in the samples 10, 50, and 85, and want to
know their class name.
>>> from sklearn.datasets import load_breast_cancer
>>> data = load_breast_cancer()
>>> data.target[[10, 50, 85]]
array([0, 1, 0])
>>> list(data.target_names)
['malignant', 'benign']
"""
data_file_name = "breast_cancer.csv"
data, target, target_names, fdescr = load_csv_data(
data_file_name=data_file_name, descr_file_name="breast_cancer.rst"
)
feature_names = np.array(
[
"mean radius",
"mean texture",
"mean perimeter",
"mean area",
"mean smoothness",
"mean compactness",
"mean concavity",
"mean concave points",
"mean symmetry",
"mean fractal dimension",
"radius error",
"texture error",
"perimeter error",
"area error",
"smoothness error",
"compactness error",
"concavity error",
"concave points error",
"symmetry error",
"fractal dimension error",
"worst radius",
"worst texture",
"worst perimeter",
"worst area",
"worst smoothness",
"worst compactness",
"worst concavity",
"worst concave points",
"worst symmetry",
"worst fractal dimension",
]
)
frame = None
target_columns = [
"target",
]
if as_frame:
frame, data, target = _convert_data_dataframe(
"load_breast_cancer", data, target, feature_names, target_columns
)
if return_X_y:
return data, target
return Bunch(
data=data,
target=target,
frame=frame,
target_names=target_names,
DESCR=fdescr,
feature_names=feature_names,
filename=data_file_name,
data_module=DATA_MODULE,
)
def load_digits(*, n_class=10, return_X_y=False, as_frame=False):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Read more in the :ref:`User Guide <digits_dataset>`.
Parameters
----------
n_class : int, default=10
The number of classes to return. Between 0 and 10.
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (1797, 64)
The flattened data matrix. If `as_frame=True`, `data` will be
a pandas DataFrame.
target: {ndarray, Series} of shape (1797,)
The classification target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names: list
The names of the dataset columns.
target_names: list
The names of target classes.
.. versionadded:: 0.20
frame: DataFrame of shape (1797, 65)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
images: {ndarray} of shape (1797, 8, 8)
The raw image data.
DESCR: str
The full description of the dataset.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
This is a copy of the test set of the UCI ML hand-written digits datasets
https://archive.ics.uci.edu/ml/datasets/Optical+Recognition+of+Handwritten+Digits
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.matshow(digits.images[0])
<...>
>>> plt.show()
"""
data, fdescr = load_gzip_compressed_csv_data(
data_file_name="digits.csv.gz", descr_file_name="digits.rst", delimiter=","
)
target = data[:, -1].astype(int, copy=False)
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
feature_names = [
"pixel_{}_{}".format(row_idx, col_idx)
for row_idx in range(8)
for col_idx in range(8)
]
frame = None
target_columns = [
"target",
]
if as_frame:
frame, flat_data, target = _convert_data_dataframe(
"load_digits", flat_data, target, feature_names, target_columns
)
if return_X_y:
return flat_data, target
return Bunch(
data=flat_data,
target=target,
frame=frame,
feature_names=feature_names,
target_names=np.arange(10),
images=images,
DESCR=fdescr,
)
def load_diabetes(*, return_X_y=False, as_frame=False):
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
.. note::
The meaning of each feature (i.e. `feature_names`) might be unclear
(especially for `ltg`) as the documentation of the original dataset is
not explicit. We provide information that seems correct in regard with
the scientific literature in this field of research.
Read more in the :ref:`User Guide <diabetes_dataset>`.
Parameters
----------
return_X_y : bool, default=False.
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (442, 10)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, Series} of shape (442,)
The regression target. If `as_frame=True`, `target` will be
a pandas Series.
feature_names: list
The names of the dataset columns.
frame: DataFrame of shape (442, 11)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
data_filename: str
The path to the location of the data.
target_filename: str
The path to the location of the target.
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
"""
data_filename = "diabetes_data.csv.gz"
target_filename = "diabetes_target.csv.gz"
data = load_gzip_compressed_csv_data(data_filename)
target = load_gzip_compressed_csv_data(target_filename)
fdescr = load_descr("diabetes.rst")
feature_names = ["age", "sex", "bmi", "bp", "s1", "s2", "s3", "s4", "s5", "s6"]
frame = None
target_columns = [
"target",
]
if as_frame:
frame, data, target = _convert_data_dataframe(
"load_diabetes", data, target, feature_names, target_columns
)
if return_X_y:
return data, target
return Bunch(
data=data,
target=target,
frame=frame,
DESCR=fdescr,
feature_names=feature_names,
data_filename=data_filename,
target_filename=target_filename,
data_module=DATA_MODULE,
)
def load_linnerud(*, return_X_y=False, as_frame=False):
"""Load and return the physical excercise linnerud dataset.
This dataset is suitable for multi-ouput regression tasks.
============== ============================
Samples total 20
Dimensionality 3 (for both data and target)
Features integer
Targets integer
============== ============================
Read more in the :ref:`User Guide <linnerrud_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
as_frame : bool, default=False
If True, the data is a pandas DataFrame including columns with
appropriate dtypes (numeric, string or categorical). The target is
a pandas DataFrame or Series depending on the number of target columns.
If `return_X_y` is True, then (`data`, `target`) will be pandas
DataFrames or Series as described below.
.. versionadded:: 0.23
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : {ndarray, dataframe} of shape (20, 3)
The data matrix. If `as_frame=True`, `data` will be a pandas
DataFrame.
target: {ndarray, dataframe} of shape (20, 3)
The regression targets. If `as_frame=True`, `target` will be
a pandas DataFrame.
feature_names: list
The names of the dataset columns.
target_names: list
The names of the target columns.
frame: DataFrame of shape (20, 6)
Only present when `as_frame=True`. DataFrame with `data` and
`target`.
.. versionadded:: 0.23
DESCR: str
The full description of the dataset.
data_filename: str
The path to the location of the data.
target_filename: str
The path to the location of the target.
.. versionadded:: 0.20
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
"""
data_filename = "linnerud_exercise.csv"
target_filename = "linnerud_physiological.csv"
# Read header and data
with resources.open_text(DATA_MODULE, data_filename) as f:
header_exercise = f.readline().split()
f.seek(0) # reset file obj
data_exercise = np.loadtxt(f, skiprows=1)
with resources.open_text(DATA_MODULE, target_filename) as f:
header_physiological = f.readline().split()
f.seek(0) # reset file obj
data_physiological = np.loadtxt(f, skiprows=1)
fdescr = load_descr("linnerud.rst")
frame = None
if as_frame:
(frame, data_exercise, data_physiological) = _convert_data_dataframe(
"load_linnerud",
data_exercise,
data_physiological,
header_exercise,
header_physiological,
)
if return_X_y:
return data_exercise, data_physiological
return Bunch(
data=data_exercise,
feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
frame=frame,
DESCR=fdescr,
data_filename=data_filename,
target_filename=target_filename,
data_module=DATA_MODULE,
)
def load_boston(*, return_X_y=False):
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Read more in the :ref:`User Guide <boston_dataset>`.
Parameters
----------
return_X_y : bool, default=False
If True, returns ``(data, target)`` instead of a Bunch object.
See below for more information about the `data` and `target` object.
.. versionadded:: 0.18
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
data : ndarray of shape (506, 13)
The data matrix.
target : ndarray of shape (506, )
The regression target.
filename : str
The physical location of boston csv dataset.
.. versionadded:: 0.20
DESCR : str
The full description of the dataset.
feature_names : ndarray
The names of features
(data, target) : tuple if ``return_X_y`` is True
.. versionadded:: 0.18
Notes
-----
.. versionchanged:: 0.20
Fixed a wrong data point at [445, 0].
Examples
--------
>>> from sklearn.datasets import load_boston
>>> X, y = load_boston(return_X_y=True)
>>> print(X.shape)
(506, 13)
"""
descr_text = load_descr("boston_house_prices.rst")
data_file_name = "boston_house_prices.csv"
with resources.open_text(DATA_MODULE, data_file_name) as f:
data_file = csv.reader(f)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float64)
target[i] = np.asarray(d[-1], dtype=np.float64)
if return_X_y:
return data, target
return Bunch(
data=data,
target=target,
# last column is target value
feature_names=feature_names[:-1],
DESCR=descr_text,
filename=data_file_name,
data_module=DATA_MODULE,
)
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Read more in the :ref:`User Guide <sample_images>`.
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
images : list of ndarray of shape (427, 640, 3)
The two sample image.
filenames : list
The filenames for the images.
DESCR : str
The full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# import PIL only when needed
from ..externals._pilutil import imread
descr = load_descr("README.txt", descr_module=IMAGES_MODULE)
filenames, images = [], []
for filename in sorted(resources.contents(IMAGES_MODULE)):
if filename.endswith(".jpg"):
filenames.append(filename)
with resources.open_binary(IMAGES_MODULE, filename) as image_file:
image = imread(image_file)
images.append(image)
return Bunch(images=images, filenames=filenames, DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Read more in the :ref:`User Guide <sample_images>`.
Parameters
----------
image_name : {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img : 3D array
The image as a numpy array: height x width x color
Examples
--------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
def _pkl_filepath(*args, **kwargs):
"""Return filename for Python 3 pickles
args[-1] is expected to be the ".pkl" filename. For compatibility with
older scikit-learn versions, a suffix is inserted before the extension.
_pkl_filepath('/path/to/folder', 'filename.pkl') returns
'/path/to/folder/filename_py3.pkl'
"""
py3_suffix = kwargs.get("py3_suffix", "_py3")
basename, ext = splitext(args[-1])
basename += py3_suffix
new_args = args[:-1] + (basename + ext,)
return join(*new_args)
def _sha256(path):
"""Calculate the sha256 hash of the file at path."""
sha256hash = hashlib.sha256()
chunk_size = 8192
with open(path, "rb") as f:
while True:
buffer = f.read(chunk_size)
if not buffer:
break
sha256hash.update(buffer)
return sha256hash.hexdigest()
def _fetch_remote(remote, dirname=None):
"""Helper function to download a remote dataset into path
Fetch a dataset pointed by remote's url, save into path using remote's
filename and ensure its integrity based on the SHA256 Checksum of the
downloaded file.
Parameters
----------
remote : RemoteFileMetadata
Named tuple containing remote dataset meta information: url, filename
and checksum
dirname : str
Directory to save the file to.
Returns
-------
file_path: str
Full path of the created file.
"""
file_path = remote.filename if dirname is None else join(dirname, remote.filename)
urlretrieve(remote.url, file_path)
checksum = _sha256(file_path)
if remote.checksum != checksum:
raise IOError(
"{} has an SHA256 checksum ({}) "
"differing from expected ({}), "
"file may be corrupted.".format(file_path, checksum, remote.checksum)
)
return file_path
| |
# Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
from __future__ import division
import unittest
from itertools import groupby
import itertools
from dimod import ising_to_qubo, qubo_to_ising, ising_energy, qubo_energy
class TestIsingEnergy(unittest.TestCase):
def test_trivial(self):
en = ising_energy({}, {}, {})
self.assertEqual(en, 0)
def test_typical(self):
# AND gate
h = {0: -.5, 1: 0, 2: 1, 3: -.5}
J = {(0, 2): -1, (1, 2): -1, (0, 3): .5, (1, 3): -1}
en0 = min(ising_energy({0: -1, 1: -1, 2: -1, 3: -1}, h, J),
ising_energy({0: -1, 1: -1, 2: -1, 3: +1}, h, J))
en1 = min(ising_energy({0: +1, 1: -1, 2: -1, 3: -1}, h, J),
ising_energy({0: +1, 1: -1, 2: -1, 3: +1}, h, J))
en2 = min(ising_energy({0: -1, 1: +1, 2: -1, 3: -1}, h, J),
ising_energy({0: -1, 1: +1, 2: -1, 3: +1}, h, J))
en3 = min(ising_energy({0: +1, 1: +1, 2: +1, 3: -1}, h, J),
ising_energy({0: +1, 1: +1, 2: +1, 3: +1}, h, J))
self.assertEqual(en0, en1)
self.assertEqual(en0, en2)
self.assertEqual(en0, en3)
class TestQuboEnergy(unittest.TestCase):
def test_trivial(self):
en = qubo_energy({}, {})
self.assertEqual(en, 0)
def test_typical(self):
# AND gate
h = {0: -.5, 1: 0, 2: 1, 3: -.5}
J = {(0, 2): -1, (1, 2): -1, (0, 3): .5, (1, 3): -1}
Q, __ = ising_to_qubo(h, J)
en0 = min(qubo_energy({0: 0, 1: 0, 2: 0, 3: 0}, Q),
qubo_energy({0: 0, 1: 0, 2: 0, 3: 1}, Q))
en1 = min(qubo_energy({0: 1, 1: 0, 2: 0, 3: 0}, Q),
qubo_energy({0: 1, 1: 0, 2: 0, 3: 1}, Q))
en2 = min(qubo_energy({0: 0, 1: 1, 2: 0, 3: 0}, Q),
qubo_energy({0: 0, 1: 1, 2: 0, 3: 1}, Q))
en3 = min(qubo_energy({0: 1, 1: 1, 2: 1, 3: 0}, Q),
qubo_energy({0: 1, 1: 1, 2: 1, 3: 1}, Q))
self.assertEqual(en0, en1)
self.assertEqual(en0, en2)
self.assertEqual(en0, en3)
class TestIsingToQubo(unittest.TestCase):
def test_trivial(self):
q, offset = ising_to_qubo({}, {})
self.assertEqual(q, {})
self.assertEqual(offset, 0)
def test_no_zeros(self):
q, offset = ising_to_qubo({0: 0, 0: 0, 0: 0}, {(0, 0): 0, (4, 5): 0})
self.assertEqual(q, {(0, 0): 0.0})
self.assertEqual(offset, 0)
def test_j_diag(self):
q, offset = ising_to_qubo({}, {(0, 0): 1, (300, 300): 99})
self.assertEqual(q, {(0, 0): 0.0, (300, 300): 0.0})
self.assertEqual(offset, 100)
def test_typical(self):
h = {i: v for i, v in enumerate([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4])}
j = {(0, 5): 2, (0, 8): 4, (1, 4): -5, (1, 7): 1, (2, 0): 5,
(2, 1): 4, (3, 0): -1, (3, 6): -3, (3, 8): 3, (4, 0): 2, (4, 7): 3,
(4, 9): 3, (5, 1): 3, (6, 5): -4, (6, 7): -4, (7, 1): -4,
(7, 8): 3, (8, 2): -4, (8, 3): -3, (8, 6): -5, (8, 7): -4, (9, 0): 4,
(9, 1): -1, (9, 4): -5, (9, 7): 3}
q, offset = ising_to_qubo(h, j)
# norm_q = normalized_matrix(q)
ans = {(0, 0): -42, (0, 2): 20, (0, 3): -4, (0, 4): 8,
(0, 5): 8, (0, 8): 16, (0, 9): 16, (1, 1): -4,
(1, 2): 16, (1, 4): -20, (1, 5): 12, (1, 7): -12,
(1, 9): -4, (2, 2): -16, (2, 8): -16, (3, 3): 4,
(3, 6): -12, (4, 4): 2, (4, 7): 12, (4, 9): -8,
(5, 5): -2, (5, 6): -16, (6, 6): 34, (6, 7): -16,
(6, 8): -20, (7, 7): 8, (7, 8): -4, (7, 9): 12,
(8, 8): 18}
for (u, v), bias in normalized_matrix(q).items():
self.assertIn((u, v), ans)
self.assertEqual(bias, ans[(u, v)])
self.assertEqual(offset, 2)
def test_energy(self):
h = {v: v for v in range(0, 100, 2)}
h.update({v: -(1 / v) for v in range(1, 100, 2)})
J = {(u, v): 2 * (u / 3) + v ** .5 for (u, v) in itertools.combinations(range(100), 2)}
spin_sample = {v: 1 if v % 2 else -1 for v in h}
bin_sample = {v: 1 if v % 2 else 0 for v in h}
Q, off = ising_to_qubo(h, J)
ising_en = ising_energy(spin_sample, h, J)
qubo_en = qubo_energy(bin_sample, Q)
self.assertAlmostEqual(ising_en, qubo_en + off)
def test_offset_propogation(self):
h = {v: 1 / (v + 1) for v in range(10)}
J = {(u, v): 2 * (u / 3) + v ** .5 for (u, v) in itertools.combinations(range(10), 2)}
Q, offset = ising_to_qubo(h, J)
Q, offset2 = ising_to_qubo(h, J, offset=3)
self.assertAlmostEqual(offset + 3, offset2)
class TestQuboToIsing(unittest.TestCase):
def test_trivial(self):
h, j, offset = qubo_to_ising({})
self.assertEqual(h, {})
self.assertEqual(j, {})
self.assertEqual(offset, 0)
def test_no_zeros(self):
h, j, offset = qubo_to_ising({(0, 0): 0, (4, 5): 0})
self.assertEqual(h, {0: 0, 4: 0, 5: 0})
self.assertEqual(j, {})
self.assertEqual(offset, 0)
def test_typical(self):
q = {(0, 0): 4, (0, 3): 5, (0, 5): 4, (1, 1): 5, (1, 6): 1, (1, 7): -2,
(1, 9): -3, (3, 0): -2, (3, 1): 2, (4, 5): 4, (4, 8): 2, (4, 9): -1,
(5, 1): 2, (5, 6): -5, (5, 8): -4, (6, 0): 1, (6, 5): 2, (6, 6): -4,
(6, 7): -2, (7, 0): -2, (7, 5): -3, (7, 6): -5, (7, 7): -3, (7, 8): 1,
(8, 0): 2, (8, 5): 1, (9, 7): -3}
h, j, offset = qubo_to_ising(q)
self.assertEqual(h, {0: 4.0, 1: 2.5, 3: 1.25, 4: 1.25, 5: 0.25,
6: -4.0, 7: -5.5, 8: 0.5, 9: -1.75})
norm_j = normalized_matrix(j)
self.assertEqual(norm_j, {(0, 3): 0.75, (0, 5): 1, (0, 6): 0.25, (0, 7): -0.5,
(0, 8): 0.5, (1, 3): 0.5, (1, 5): 0.5, (1, 6): 0.25,
(1, 7): -0.5, (1, 9): -0.75, (4, 5): 1, (4, 8): 0.5,
(4, 9): -0.25, (5, 6): -0.75, (5, 7): -0.75,
(5, 8): -0.75, (6, 7): -1.75, (7, 8): 0.25,
(7, 9): -0.75})
self.assertEqual(offset, -0.25)
class TestUtilitiesIntegration(unittest.TestCase):
def test_start_from_binary(self):
h = {i: v for i, v in enumerate([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4])}
j = {(0, 5): 2, (0, 8): 4, (1, 4): -5, (1, 7): 1, (2, 0): 5,
(2, 1): 4, (3, 0): -1, (3, 6): -3, (3, 8): 3, (4, 0): 2, (4, 7): 3,
(4, 9): 3, (5, 1): 3, (6, 5): -4, (6, 7): -4, (7, 1): -4,
(7, 8): 3, (8, 2): -4, (8, 3): -3, (8, 6): -5, (8, 7): -4, (9, 0): 4,
(9, 1): -1, (9, 4): -5, (9, 7): 3}
ioff = 1.7
q, qoff = ising_to_qubo(h, j, ioff)
bin_sample = {}
ising_sample = {}
for v in h:
bin_sample[v] = 1
ising_sample[v] = 1
self.assertAlmostEqual(ising_energy(ising_sample, h, j, ioff),
qubo_energy(bin_sample, q, qoff))
def test_start_from_spin(self):
Q = {(0, 0): 4, (0, 3): 5, (0, 5): 4, (1, 1): 5, (1, 6): 1, (1, 7): -2,
(1, 9): -3, (3, 0): -2, (3, 1): 2, (4, 5): 4, (4, 8): 2, (4, 9): -1,
(5, 1): 2, (5, 6): -5, (5, 8): -4, (6, 0): 1, (6, 5): 2, (6, 6): -4,
(6, 7): -2, (7, 0): -2, (7, 5): -3, (7, 6): -5, (7, 7): -3, (7, 8): 1,
(8, 0): 2, (8, 5): 1, (9, 7): -3}
qoff = 1.3
h, J, ioff = qubo_to_ising(Q, qoff)
bin_sample = {}
ising_sample = {}
for v in h:
bin_sample[v] = 0
ising_sample[v] = -1
self.assertAlmostEqual(ising_energy(ising_sample, h, J, ioff),
qubo_energy(bin_sample, Q, qoff))
def normalized_matrix(mat):
def key_fn(x):
return x[0]
smat = sorted(((sorted(k), v) for k, v in mat.items()), key=key_fn)
return dict((tuple(k), s) for k, g in groupby(smat, key=key_fn) for s in
[sum(v for _, v in g)] if s != 0)
| |
################################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
Unit tests for `categorical` module.
"""
import warnings
warnings.simplefilter("error")
import warnings
import numpy as np
import scipy
from bayespy.nodes import (Categorical,
Dirichlet,
Mixture,
Gamma)
from bayespy.utils import random
from bayespy.utils.misc import TestCase
class TestCategorical(TestCase):
"""
Unit tests for Categorical node
"""
def test_init(self):
"""
Test the creation of categorical nodes.
"""
# Some simple initializations
X = Categorical([0.1, 0.3, 0.6])
X = Categorical(Dirichlet([5,4,3]))
# Check that plates are correct
X = Categorical([0.1, 0.3, 0.6], plates=(3,4))
self.assertEqual(X.plates,
(3,4))
X = Categorical(0.25*np.ones((2,3,4)))
self.assertEqual(X.plates,
(2,3))
X = Categorical(Dirichlet([2,1,9], plates=(3,4)))
self.assertEqual(X.plates,
(3,4))
# Probabilities not a vector
self.assertRaises(ValueError,
Categorical,
0.5)
# Invalid probability
self.assertRaises(ValueError,
Categorical,
[-0.5, 1.5],
n=10)
self.assertRaises(ValueError,
Categorical,
[0.5, 1.5],
n=10)
# Inconsistent plates
self.assertRaises(ValueError,
Categorical,
0.25*np.ones((2,4)),
plates=(3,),
n=10)
# Explicit plates too small
self.assertRaises(ValueError,
Categorical,
0.25*np.ones((2,4)),
plates=(1,),
n=10)
pass
def test_moments(self):
"""
Test the moments of categorical nodes.
"""
# Simple test
X = Categorical([0.7,0.2,0.1])
u = X._message_to_child()
self.assertEqual(len(u), 1)
self.assertAllClose(u[0],
[0.7,0.2,0.1])
# Test plates in p
p = np.random.dirichlet([1,1], size=3)
X = Categorical(p)
u = X._message_to_child()
self.assertAllClose(u[0],
p)
# Test with Dirichlet prior
P = Dirichlet([7, 3])
logp = P._message_to_child()[0]
p0 = np.exp(logp[0]) / (np.exp(logp[0]) + np.exp(logp[1]))
p1 = np.exp(logp[1]) / (np.exp(logp[0]) + np.exp(logp[1]))
X = Categorical(P)
u = X._message_to_child()
p = np.array([p0, p1])
self.assertAllClose(u[0],
p)
# Test with broadcasted plates
P = Dirichlet([7, 3], plates=(10,))
X = Categorical(P)
u = X._message_to_child()
self.assertAllClose(u[0] * np.ones(X.get_shape(0)),
p*np.ones((10,1)))
pass
def test_observed(self):
"""
Test observed categorical nodes
"""
# Single observation
X = Categorical([0.7,0.2,0.1])
X.observe(2)
u = X._message_to_child()
self.assertAllClose(u[0],
[0,0,1])
# One plate axis
X = Categorical([0.7,0.2,0.1], plates=(2,))
X.observe([2,1])
u = X._message_to_child()
self.assertAllClose(u[0],
[[0,0,1],
[0,1,0]])
# Several plate axes
X = Categorical([0.7,0.1,0.1,0.1], plates=(2,3,))
X.observe([[2,1,1],
[0,2,3]])
u = X._message_to_child()
self.assertAllClose(u[0],
[ [[0,0,1,0],
[0,1,0,0],
[0,1,0,0]],
[[1,0,0,0],
[0,0,1,0],
[0,0,0,1]] ])
# Check invalid observations
X = Categorical([0.7,0.2,0.1])
self.assertRaises(ValueError,
X.observe,
-1)
self.assertRaises(ValueError,
X.observe,
3)
self.assertRaises(ValueError,
X.observe,
1.5)
pass
def test_constant(self):
"""
Test constant categorical nodes
"""
# Basic test
Y = Mixture(2, Gamma, [1, 2, 3], [1, 1, 1])
u = Y._message_to_child()
self.assertAllClose(u[0],
3/1)
# Test with one plate axis
alpha = [[1, 2, 3],
[4, 5, 6]]
Y = Mixture([2, 1], Gamma, alpha, 1)
u = Y._message_to_child()
self.assertAllClose(u[0],
[3, 5])
# Test with two plate axes
alpha = [ [[1, 2, 3],
[4, 5, 6]],
[[7, 8, 9],
[10, 11, 12]] ]
Y = Mixture([[2, 1], [0, 2]], Gamma, alpha, 1)
u = Y._message_to_child()
self.assertAllClose(u[0],
[[3, 5],
[7, 12]])
pass
def test_initialization(self):
"""
Test initialization of categorical nodes
"""
# Test initialization from random
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
Z = Categorical([[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]])
Z.initialize_from_random()
u = Z._message_to_child()
self.assertAllClose(u[0],
[[0, 1, 0],
[0, 0, 1]])
pass
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dense_features."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import test_util as tf_test_utils # pylint: disable=g-direct-tensorflow-import
from keras.testing_infra import test_combinations
from keras.feature_column import dense_features as df
def _initialized_session(config=None):
sess = tf.compat.v1.Session(config=config)
sess.run(tf.compat.v1.global_variables_initializer())
sess.run(tf.compat.v1.tables_initializer())
return sess
class DenseFeaturesTest(test_combinations.TestCase):
@test_combinations.generate(
test_combinations.combine(mode=['graph', 'eager']))
def test_retrieving_input(self):
features = {'a': [0.]}
dense_features = df.DenseFeatures(tf.feature_column.numeric_column('a'))
inputs = self.evaluate(dense_features(features))
self.assertAllClose([[0.]], inputs)
@test_combinations.generate(test_combinations.combine(mode=['eager']))
def test_reuses_variables(self):
sparse_input = tf.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)), values=(0, 1, 2), dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = tf.feature_column.categorical_column_with_identity(
key='a', num_buckets=3)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info=None):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0, 1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = tf.feature_column.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
dense_features = df.DenseFeatures([embedding_column])
features = {'a': sparse_input}
inputs = dense_features(features)
variables = dense_features.variables
# Sanity check: test that the inputs are correct.
self.assertAllEqual([[1, 0], [0, 1], [1, 1]], inputs)
# Check that only one variable was created.
self.assertEqual(1, len(variables))
# Check that invoking dense_features on the same features does not create
# additional variables
_ = dense_features(features)
self.assertEqual(1, len(variables))
self.assertIs(variables[0], dense_features.variables[0])
@test_combinations.generate(test_combinations.combine(mode=['eager']))
def test_dense_feature_with_partitioner(self):
sparse_input = tf.SparseTensor(
indices=((0, 0), (1, 0), (2, 0), (3, 0)),
values=(0, 1, 3, 2),
dense_shape=(4, 4))
# Create feature columns (categorical and embedding).
categorical_column = tf.feature_column.categorical_column_with_identity(
key='a', num_buckets=4)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info=None):
offset = partition_info._var_offset[0]
del shape # unused
del dtype # unused
if offset == 0:
embedding_values = (
(1, 0), # id 0
(0, 1)) # id 1
else:
embedding_values = (
(1, 1), # id 2
(2, 2)) # id 3
return embedding_values
embedding_column = tf.feature_column.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
dense_features = df.DenseFeatures(
[embedding_column], partitioner=tf.compat.v1.fixed_size_partitioner(2))
features = {'a': sparse_input}
inputs = dense_features(features)
variables = dense_features.variables
# Sanity check: test that the inputs are correct.
self.assertAllEqual([[1, 0], [0, 1], [2, 2], [1, 1]], inputs)
# Check that only one variable was created.
self.assertEqual(2, len(variables))
# Check that invoking dense_features on the same features does not create
# additional variables
_ = dense_features(features)
self.assertEqual(2, len(variables))
self.assertIs(variables[0], dense_features.variables[0])
self.assertIs(variables[1], dense_features.variables[1])
@test_combinations.generate(test_combinations.combine(mode=['eager']))
def test_feature_column_dense_features_gradient(self):
sparse_input = tf.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)), values=(0, 1, 2), dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = tf.feature_column.categorical_column_with_identity(
key='a', num_buckets=3)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info=None):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0, 1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = tf.feature_column.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
dense_features = df.DenseFeatures([embedding_column])
features = {'a': sparse_input}
def scale_matrix():
matrix = dense_features(features)
return 2 * matrix
# Sanity check: Verify that scale_matrix returns the correct output.
self.assertAllEqual([[2, 0], [0, 2], [2, 2]], scale_matrix())
# Check that the returned gradient is correct.
grad_function = backprop.implicit_grad(scale_matrix)
grads_and_vars = grad_function()
indexed_slice = grads_and_vars[0][0]
gradient = grads_and_vars[0][0].values
self.assertAllEqual([0, 1, 2], indexed_slice.indices)
self.assertAllEqual([[2, 2], [2, 2], [2, 2]], gradient)
def test_raises_if_empty_feature_columns(self):
with self.assertRaisesRegex(ValueError,
'feature_columns must not be empty'):
df.DenseFeatures(feature_columns=[])(features={})
def test_should_be_dense_column(self):
with self.assertRaisesRegex(ValueError, 'must be a .*DenseColumn'):
df.DenseFeatures(feature_columns=[
tf.feature_column.categorical_column_with_hash_bucket('wire_cast', 4)
])(
features={
'a': [[0]]
})
def test_does_not_support_dict_columns(self):
with self.assertRaisesRegex(
ValueError, 'Expected feature_columns to be iterable, found dict.'):
df.DenseFeatures(
feature_columns={'a': tf.feature_column.numeric_column('a')})(
features={
'a': [[0]]
})
def test_bare_column(self):
with tf.Graph().as_default():
features = features = {'a': [0.]}
net = df.DenseFeatures(tf.feature_column.numeric_column('a'))(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[0.]], self.evaluate(net))
def test_column_generator(self):
with tf.Graph().as_default():
features = features = {'a': [0.], 'b': [1.]}
columns = (tf.feature_column.numeric_column(key) for key in features)
net = df.DenseFeatures(columns)(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[0., 1.]], self.evaluate(net))
def test_raises_if_duplicate_name(self):
with self.assertRaisesRegex(
ValueError, 'Duplicate feature column name found for columns'):
df.DenseFeatures(feature_columns=[
tf.feature_column.numeric_column('a'),
tf.feature_column.numeric_column('a')
])(
features={
'a': [[0]]
})
def test_one_column(self):
price = tf.feature_column.numeric_column('price')
with tf.Graph().as_default():
features = {'price': [[1.], [5.]]}
net = df.DenseFeatures([price])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1.], [5.]], self.evaluate(net))
def test_multi_dimension(self):
price = tf.feature_column.numeric_column('price', shape=2)
with tf.Graph().as_default():
features = {'price': [[1., 2.], [5., 6.]]}
net = df.DenseFeatures([price])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net))
def test_compute_output_shape(self):
price1 = tf.feature_column.numeric_column('price1', shape=2)
price2 = tf.feature_column.numeric_column('price2', shape=4)
with tf.Graph().as_default():
features = {
'price1': [[1., 2.], [5., 6.]],
'price2': [[3., 4., 5., 6.], [7., 8., 9., 10.]]
}
dense_features = df.DenseFeatures([price1, price2])
self.assertEqual((None, 6), dense_features.compute_output_shape((None,)))
net = dense_features(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1., 2., 3., 4., 5., 6.], [5., 6., 7., 8., 9., 10.]],
self.evaluate(net))
def test_raises_if_shape_mismatch(self):
price = tf.feature_column.numeric_column('price', shape=2)
with tf.Graph().as_default():
features = {'price': [[1.], [5.]]}
with self.assertRaisesRegex(
Exception,
r'Cannot reshape a tensor with 2 elements to shape \[2,2\]'):
df.DenseFeatures([price])(features)
def test_reshaping(self):
price = tf.feature_column.numeric_column('price', shape=[1, 2])
with tf.Graph().as_default():
features = {'price': [[[1., 2.]], [[5., 6.]]]}
net = df.DenseFeatures([price])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1., 2.], [5., 6.]], self.evaluate(net))
def test_multi_column(self):
price1 = tf.feature_column.numeric_column('price1', shape=2)
price2 = tf.feature_column.numeric_column('price2')
with tf.Graph().as_default():
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
net = df.DenseFeatures([price1, price2])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net))
def test_cols_to_output_tensors(self):
price1 = tf.feature_column.numeric_column('price1', shape=2)
price2 = tf.feature_column.numeric_column('price2')
with tf.Graph().as_default():
cols_dict = {}
features = {'price1': [[1., 2.], [5., 6.]], 'price2': [[3.], [4.]]}
dense_features = df.DenseFeatures([price1, price2])
net = dense_features(features, cols_dict)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1., 2.], [5., 6.]],
self.evaluate(cols_dict[price1]))
self.assertAllClose([[3.], [4.]], self.evaluate(cols_dict[price2]))
self.assertAllClose([[1., 2., 3.], [5., 6., 4.]], self.evaluate(net))
def test_column_order(self):
price_a = tf.feature_column.numeric_column('price_a')
price_b = tf.feature_column.numeric_column('price_b')
with tf.Graph().as_default():
features = {
'price_a': [[1.]],
'price_b': [[3.]],
}
net1 = df.DenseFeatures([price_a, price_b])(features)
net2 = df.DenseFeatures([price_b, price_a])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[1., 3.]], self.evaluate(net1))
self.assertAllClose([[1., 3.]], self.evaluate(net2))
def test_fails_for_categorical_column(self):
animal = tf.feature_column.categorical_column_with_identity(
'animal', num_buckets=4)
with tf.Graph().as_default():
features = {
'animal':
tf.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
with self.assertRaisesRegex(Exception, 'must be a .*DenseColumn'):
df.DenseFeatures([animal])(features)
def test_static_batch_size_mismatch(self):
price1 = tf.feature_column.numeric_column('price1')
price2 = tf.feature_column.numeric_column('price2')
with tf.Graph().as_default():
features = {
'price1': [[1.], [5.], [7.]], # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
with self.assertRaisesRegex(
ValueError,
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
df.DenseFeatures([price1, price2])(features)
def test_subset_of_static_batch_size_mismatch(self):
price1 = tf.feature_column.numeric_column('price1')
price2 = tf.feature_column.numeric_column('price2')
price3 = tf.feature_column.numeric_column('price3')
with tf.Graph().as_default():
features = {
'price1': tf.compat.v1.placeholder(dtype=tf.int64), # batchsize = 3
'price2': [[3.], [4.]], # batchsize = 2
'price3': [[3.], [4.], [5.]] # batchsize = 3
}
with self.assertRaisesRegex(
ValueError,
r'Batch size \(first dimension\) of each feature must be same.'): # pylint: disable=anomalous-backslash-in-string
df.DenseFeatures([price1, price2, price3])(features)
def test_runtime_batch_size_mismatch(self):
price1 = tf.feature_column.numeric_column('price1')
price2 = tf.feature_column.numeric_column('price2')
with tf.Graph().as_default():
features = {
'price1': tf.compat.v1.placeholder(dtype=tf.int64), # batchsize = 3
'price2': [[3.], [4.]] # batchsize = 2
}
net = df.DenseFeatures([price1, price2])(features)
with _initialized_session() as sess:
with self.assertRaisesRegex(tf.errors.OpError,
'Dimension 0 in both shapes must be equal|'
'Dimensions of inputs should match'):
sess.run(net, feed_dict={features['price1']: [[1.], [5.], [7.]]})
def test_runtime_batch_size_matches(self):
price1 = tf.feature_column.numeric_column('price1')
price2 = tf.feature_column.numeric_column('price2')
with tf.Graph().as_default():
features = {
'price1': tf.compat.v1.placeholder(dtype=tf.int64), # batchsize = 2
'price2': tf.compat.v1.placeholder(dtype=tf.int64), # batchsize = 2
}
net = df.DenseFeatures([price1, price2])(features)
with _initialized_session() as sess:
sess.run(
net,
feed_dict={
features['price1']: [[1.], [5.]],
features['price2']: [[1.], [5.]],
})
def test_multiple_layers_with_same_embedding_column(self):
some_sparse_column = tf.feature_column.categorical_column_with_hash_bucket(
'sparse_feature', hash_bucket_size=5)
some_embedding_column = tf.feature_column.embedding_column(
some_sparse_column, dimension=10)
with tf.Graph().as_default():
features = {
'sparse_feature': [['a'], ['x']],
}
all_cols = [some_embedding_column]
df.DenseFeatures(all_cols)(features)
df.DenseFeatures(all_cols)(features)
# Make sure that 2 variables get created in this case.
self.assertEqual(
2,
len(
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)))
expected_var_names = [
'dense_features/sparse_feature_embedding/embedding_weights:0',
'dense_features_1/sparse_feature_embedding/embedding_weights:0'
]
self.assertCountEqual(expected_var_names, [
v.name for v in tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
])
@tf_test_utils.run_deprecated_v1
def test_multiple_layers_with_same_shared_embedding_column(self):
categorical_column_a = tf.feature_column.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = tf.feature_column.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_b, embedding_column_a = tf.feature_column.shared_embeddings(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
with tf.Graph().as_default():
features = {
'aaa':
tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
all_cols = [embedding_column_a, embedding_column_b]
df.DenseFeatures(all_cols)(features)
df.DenseFeatures(all_cols)(features)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(
1,
len(
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)))
self.assertCountEqual(['aaa_bbb_shared_embedding:0'], [
v.name for v in tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
])
@tf_test_utils.run_deprecated_v1
def test_multiple_layers_with_same_shared_embedding_column_diff_graphs(self):
categorical_column_a = tf.feature_column.categorical_column_with_identity(
key='aaa', num_buckets=3)
categorical_column_b = tf.feature_column.categorical_column_with_identity(
key='bbb', num_buckets=3)
embedding_dimension = 2
embedding_column_b, embedding_column_a = tf.feature_column.shared_embeddings(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
all_cols = [embedding_column_a, embedding_column_b]
with tf.Graph().as_default():
features = {
'aaa':
tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
df.DenseFeatures(all_cols)(features)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(
1,
len(
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)))
with tf.Graph().as_default():
features1 = {
'aaa':
tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(0, 1, 0),
dense_shape=(2, 2)),
'bbb':
tf.SparseTensor(
indices=((0, 0), (1, 0), (1, 1)),
values=(1, 2, 1),
dense_shape=(2, 2)),
}
df.DenseFeatures(all_cols)(features1)
# Make sure that only 1 variable gets created in this case.
self.assertEqual(
1,
len(
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)))
self.assertCountEqual(['aaa_bbb_shared_embedding:0'], [
v.name for v in tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
])
@tf_test_utils.run_deprecated_v1
def test_with_1d_sparse_tensor(self):
embedding_values = (
(1., 2., 3., 4., 5.), # id 0
(6., 7., 8., 9., 10.), # id 1
(11., 12., 13., 14., 15.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in dense_features
price = tf.feature_column.numeric_column('price')
# one_hot_body_style has 3 dims in dense_features.
body_style = tf.feature_column.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = tf.feature_column.indicator_column(body_style)
# embedded_body_style has 5 dims in dense_features.
country = tf.feature_column.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = tf.feature_column.embedding_column(
country, dimension=5, initializer=_initializer)
# Provides 1-dim tensor and dense tensor.
features = {
'price':
tf.constant([
11.,
12.,
]),
'body-style':
tf.SparseTensor(
indices=((0,), (1,)),
values=('sedan', 'hardtop'),
dense_shape=(2,)),
# This is dense tensor for the categorical_column.
'country':
tf.constant(['CA', 'US']),
}
self.assertEqual(1, features['price'].shape.ndims)
self.assertEqual(1, features['body-style'].dense_shape.get_shape()[0])
self.assertEqual(1, features['country'].shape.ndims)
net = df.DenseFeatures([price, one_hot_body_style, embedded_country])(
features)
self.assertEqual(1 + 3 + 5, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual([[0., 0., 1., 11., 12., 13., 14., 15., 11.],
[1., 0., 0., 1., 2., 3., 4., 5., 12.]],
sess.run(net))
@tf_test_utils.run_deprecated_v1
def test_with_1d_unknown_shape_sparse_tensor(self):
embedding_values = (
(1., 2.), # id 0
(6., 7.), # id 1
(11., 12.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
del shape, dtype, partition_info
return embedding_values
# price has 1 dimension in dense_features
price = tf.feature_column.numeric_column('price')
# one_hot_body_style has 3 dims in dense_features.
body_style = tf.feature_column.categorical_column_with_vocabulary_list(
'body-style', vocabulary_list=['hardtop', 'wagon', 'sedan'])
one_hot_body_style = tf.feature_column.indicator_column(body_style)
# embedded_body_style has 5 dims in dense_features.
country = tf.feature_column.categorical_column_with_vocabulary_list(
'country', vocabulary_list=['US', 'JP', 'CA'])
embedded_country = tf.feature_column.embedding_column(
country, dimension=2, initializer=_initializer)
# Provides 1-dim tensor and dense tensor.
features = {
'price': tf.compat.v1.placeholder(tf.float32),
'body-style': tf.compat.v1.sparse_placeholder(tf.string),
# This is dense tensor for the categorical_column.
'country': tf.compat.v1.placeholder(tf.string),
}
self.assertIsNone(features['price'].shape.ndims)
self.assertIsNone(features['body-style'].get_shape().ndims)
self.assertIsNone(features['country'].shape.ndims)
price_data = np.array([11., 12.])
body_style_data = tf.compat.v1.SparseTensorValue(
indices=((0,), (1,)), values=('sedan', 'hardtop'), dense_shape=(2,))
country_data = np.array([['US'], ['CA']])
net = df.DenseFeatures([price, one_hot_body_style, embedded_country])(
features)
self.assertEqual(1 + 3 + 2, net.shape[1])
with _initialized_session() as sess:
# Each row is formed by concatenating `embedded_body_style`,
# `one_hot_body_style`, and `price` in order.
self.assertAllEqual(
[[0., 0., 1., 1., 2., 11.], [1., 0., 0., 11., 12., 12.]],
sess.run(
net,
feed_dict={
features['price']: price_data,
features['body-style']: body_style_data,
features['country']: country_data
}))
@tf_test_utils.run_deprecated_v1
def test_with_rank_0_feature(self):
# price has 1 dimension in dense_features
price = tf.feature_column.numeric_column('price')
features = {
'price': tf.constant(0),
}
self.assertEqual(0, features['price'].shape.ndims)
# Static rank 0 should fail
with self.assertRaisesRegex(ValueError, 'Feature .* cannot have rank 0'):
df.DenseFeatures([price])(features)
# Dynamic rank 0 should fail
features = {
'price': tf.compat.v1.placeholder(tf.float32),
}
net = df.DenseFeatures([price])(features)
self.assertEqual(1, net.shape[1])
with _initialized_session() as sess:
with self.assertRaisesOpError('Feature .* cannot have rank 0'):
sess.run(net, feed_dict={features['price']: np.array(1)})
class IndicatorColumnTest(tf.test.TestCase):
@tf_test_utils.run_deprecated_v1
def test_dense_features(self):
animal = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_identity(
'animal', num_buckets=4))
with tf.Graph().as_default():
features = {
'animal':
tf.SparseTensor(
indices=[[0, 0], [0, 1]], values=[1, 2], dense_shape=[1, 2])
}
net = df.DenseFeatures([animal])(features)
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllClose([[0., 1., 1., 0.]], self.evaluate(net))
class EmbeddingColumnTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
{
'testcase_name': 'use_safe_embedding_lookup',
'use_safe_embedding_lookup': True,
'partition_variables': False,
}, {
'testcase_name': 'dont_use_safe_embedding_lookup',
'use_safe_embedding_lookup': False,
'partition_variables': False,
}, {
'testcase_name': 'use_safe_embedding_lookup_partitioned',
'use_safe_embedding_lookup': True,
'partition_variables': True,
}, {
'testcase_name': 'dont_use_safe_embedding_lookup_partitioned',
'use_safe_embedding_lookup': False,
'partition_variables': True,
})
@tf_test_utils.run_deprecated_v1
def test_dense_features(self, use_safe_embedding_lookup, partition_variables):
# Inputs.
vocabulary_size = 4
sparse_input = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.), # id 2
(9., 13.) # id 3
)
def _initializer(shape, dtype, partition_info=None):
if partition_variables:
self.assertEqual([vocabulary_size, embedding_dimension],
partition_info.full_shape)
self.assertAllEqual((2, embedding_dimension), shape)
else:
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertIsNone(partition_info)
self.assertEqual(tf.float32, dtype)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = tf.feature_column.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
partitioner = None
if partition_variables:
partitioner = tf.compat.v1.fixed_size_partitioner(2, axis=0)
with tf.compat.v1.variable_scope('vars', partitioner=partitioner):
embedding_column = tf.feature_column.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer,
use_safe_embedding_lookup=use_safe_embedding_lookup)
# Provide sparse input and get dense result.
l = df.DenseFeatures((embedding_column,))
dense_features = l({'aaa': sparse_input})
# Assert expected embedding variable and lookups.
global_vars = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
if partition_variables:
self.assertCountEqual(
('vars/dense_features/aaa_embedding/embedding_weights/part_0:0',
'vars/dense_features/aaa_embedding/embedding_weights/part_1:0'),
tuple([v.name for v in global_vars]))
else:
self.assertCountEqual(
('vars/dense_features/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
for v in global_vars:
self.assertIsInstance(v, tf.Variable)
trainable_vars = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
if partition_variables:
self.assertCountEqual(
('vars/dense_features/aaa_embedding/embedding_weights/part_0:0',
'vars/dense_features/aaa_embedding/embedding_weights/part_1:0'),
tuple([v.name for v in trainable_vars]))
else:
self.assertCountEqual(
('vars/dense_features/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in trainable_vars]))
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllEqual(embedding_values, self.evaluate(trainable_vars[0]))
self.assertAllEqual(expected_lookups, self.evaluate(dense_features))
if use_safe_embedding_lookup:
self.assertIn(
'SparseFillEmptyRows',
[x.type for x in tf.compat.v1.get_default_graph().get_operations()])
else:
self.assertNotIn(
'SparseFillEmptyRows',
[x.type for x in tf.compat.v1.get_default_graph().get_operations()])
@tf_test_utils.run_deprecated_v1
def test_dense_features_not_trainable(self):
# Inputs.
vocabulary_size = 3
sparse_input = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 4), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(tf.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
# Build columns.
categorical_column = tf.feature_column.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column = tf.feature_column.embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer,
trainable=False)
# Provide sparse input and get dense result.
dense_features = df.DenseFeatures((embedding_column,))({
'aaa': sparse_input
})
# Assert expected embedding variable and lookups.
global_vars = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(('dense_features/aaa_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
self.assertCountEqual([],
tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES))
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllEqual(embedding_values, self.evaluate(global_vars[0]))
self.assertAllEqual(expected_lookups, self.evaluate(dense_features))
class SharedEmbeddingColumnTest(tf.test.TestCase, parameterized.TestCase):
def _test_dense_features(self, trainable=True):
# Inputs.
vocabulary_size = 3
sparse_input_a = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 4)),
values=(2, 0, 1),
dense_shape=(2, 5))
sparse_input_b = tf.compat.v1.SparseTensorValue(
# example 0, ids [0]
# example 1, ids []
indices=((0, 0),),
values=(0,),
dense_shape=(2, 5))
sparse_input_c = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 1), (1, 1), (1, 3)),
values=(2, 0, 1),
dense_shape=(2, 5))
sparse_input_d = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids []
indices=((0, 1),),
values=(2,),
dense_shape=(2, 5))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(tf.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0:
# A ids [2], embedding = [7, 11]
# B ids [0], embedding = [1, 2]
# C ids [2], embedding = [7, 11]
# D ids [2], embedding = [7, 11]
(7., 11., 1., 2., 7., 11., 7., 11.),
# example 1:
# A ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
# B ids [], embedding = [0, 0]
# C ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
# D ids [], embedding = [0, 0]
(2., 3.5, 0., 0., 2., 3.5, 0., 0.),
)
# Build columns.
categorical_column_a = tf.feature_column.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = tf.feature_column.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
categorical_column_c = tf.feature_column.categorical_column_with_identity(
key='ccc', num_buckets=vocabulary_size)
categorical_column_d = tf.feature_column.categorical_column_with_identity(
key='ddd', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = tf.feature_column.shared_embeddings(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer,
trainable=trainable)
embedding_column_c, embedding_column_d = tf.feature_column.shared_embeddings(
[categorical_column_c, categorical_column_d],
dimension=embedding_dimension,
initializer=_initializer,
trainable=trainable)
features = {
'aaa': sparse_input_a,
'bbb': sparse_input_b,
'ccc': sparse_input_c,
'ddd': sparse_input_d
}
# Provide sparse input and get dense result.
dense_features = df.DenseFeatures(
feature_columns=(embedding_column_b, embedding_column_a,
embedding_column_c, embedding_column_d))(
features)
# Assert expected embedding variable and lookups.
global_vars = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(
['aaa_bbb_shared_embedding:0', 'ccc_ddd_shared_embedding:0'],
tuple([v.name for v in global_vars]))
for v in global_vars:
self.assertIsInstance(v, tf.Variable)
trainable_vars = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
if trainable:
self.assertCountEqual(
['aaa_bbb_shared_embedding:0', 'ccc_ddd_shared_embedding:0'],
tuple([v.name for v in trainable_vars]))
else:
self.assertCountEqual([], tuple([v.name for v in trainable_vars]))
shared_embedding_vars = global_vars
self.evaluate(tf.compat.v1.global_variables_initializer())
self.evaluate(tf.compat.v1.tables_initializer())
self.assertAllEqual(embedding_values,
self.evaluate(shared_embedding_vars[0]))
self.assertAllEqual(expected_lookups, self.evaluate(dense_features))
@tf_test_utils.run_deprecated_v1
def test_dense_features(self):
self._test_dense_features()
@tf_test_utils.run_deprecated_v1
def test_dense_features_no_trainable(self):
self._test_dense_features(trainable=False)
@test_combinations.generate(test_combinations.combine(mode=['graph', 'eager']))
class DenseFeaturesSerializationTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(('trainable', True, 'trainable'),
('not_trainable', False, 'frozen'))
def test_get_config(self, trainable, name):
cols = [
tf.feature_column.numeric_column('a'),
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key='b', num_buckets=3),
dimension=2)
]
orig_layer = df.DenseFeatures(cols, trainable=trainable, name=name)
config = orig_layer.get_config()
self.assertEqual(config['name'], orig_layer.name)
self.assertEqual(config['trainable'], trainable)
self.assertLen(config['feature_columns'], 2)
self.assertEqual(config['feature_columns'][0]['class_name'],
'NumericColumn')
self.assertEqual(config['feature_columns'][0]['config']['shape'], (1,))
self.assertEqual(config['feature_columns'][1]['class_name'],
'EmbeddingColumn')
@parameterized.named_parameters(('trainable', True, 'trainable'),
('not_trainable', False, 'frozen'))
def test_from_config(self, trainable, name):
cols = [
tf.feature_column.numeric_column('a'),
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_vocabulary_list(
'b', vocabulary_list=['1', '2', '3']),
dimension=2),
tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_hash_bucket(
key='c', hash_bucket_size=3))
]
orig_layer = df.DenseFeatures(cols, trainable=trainable, name=name)
config = orig_layer.get_config()
new_layer = df.DenseFeatures.from_config(config)
self.assertEqual(new_layer.name, orig_layer.name)
self.assertEqual(new_layer.trainable, trainable)
self.assertLen(new_layer._feature_columns, 3)
self.assertEqual(new_layer._feature_columns[0].name, 'a')
self.assertEqual(new_layer._feature_columns[1].initializer.mean, 0.0)
self.assertEqual(new_layer._feature_columns[1].categorical_column.name, 'b')
self.assertIsInstance(new_layer._feature_columns[0], cols[0].__class__)
self.assertIsInstance(new_layer._feature_columns[1], cols[1].__class__)
self.assertIsInstance(new_layer._feature_columns[2], cols[2].__class__)
def test_crossed_column(self):
a = tf.feature_column.categorical_column_with_vocabulary_list(
'a', vocabulary_list=['1', '2', '3'])
b = tf.feature_column.categorical_column_with_vocabulary_list(
'b', vocabulary_list=['1', '2', '3'])
ab = tf.feature_column.crossed_column([a, b], hash_bucket_size=2)
cols = [tf.feature_column.indicator_column(ab)]
orig_layer = df.DenseFeatures(cols)
config = orig_layer.get_config()
new_layer = df.DenseFeatures.from_config(config)
self.assertLen(new_layer._feature_columns, 1)
self.assertEqual(new_layer._feature_columns[0].name, 'a_X_b_indicator')
@test_combinations.generate(test_combinations.combine(mode=['graph', 'eager']))
class SequenceFeatureColumnsTest(tf.test.TestCase):
"""Tests DenseFeatures with sequence feature columns."""
def test_embedding_column(self):
"""Tests that error is raised for sequence embedding column."""
vocabulary_size = 3
sparse_input = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = tf.feature_column.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
embedding_column_a = tf.feature_column.embedding_column(
categorical_column_a, dimension=2)
input_layer = df.DenseFeatures([embedding_column_a])
with self.assertRaisesRegex(
ValueError,
r'In embedding_column: aaa_embedding\. categorical_column must not be '
r'of type SequenceCategoricalColumn\.'):
_ = input_layer({'aaa': sparse_input})
def test_indicator_column(self):
"""Tests that error is raised for sequence indicator column."""
vocabulary_size = 3
sparse_input = tf.compat.v1.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
categorical_column_a = tf.feature_column.sequence_categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
indicator_column_a = tf.feature_column.indicator_column(
categorical_column_a)
input_layer = df.DenseFeatures([indicator_column_a])
with self.assertRaisesRegex(
ValueError,
r'In indicator_column: aaa_indicator\. categorical_column must not be '
r'of type SequenceCategoricalColumn\.'):
_ = input_layer({'aaa': sparse_input})
if __name__ == '__main__':
tf.test.main()
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the private `_AutoShardDataset` transformation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.experimental.ops import distribute_options
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import readers
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.experimental.ops import unique
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.kernel_tests import tf_record_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import readers as core_readers
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.platform import test
def chunk(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
class AutoShardDatasetTest(tf_record_test_base.TFRecordTestBase,
parameterized.TestCase):
def setUp(self):
super(AutoShardDatasetTest, self).setUp()
self._num_files = 10
self._num_records = 10
self._filenames = self._createFiles()
def getAllDatasetElements(self, dataset):
actual = []
next_fn = self.getNext(dataset)
while True:
try:
actual.append(self.evaluate(next_fn()))
except errors.OutOfRangeError:
break
return actual
def assertDatasetProducesWithShuffle(self, dataset, expected, batch,
num_examples, shuffle):
if shuffle:
actual = []
next_fn = self.getNext(dataset)
for _ in range(num_examples):
elem = self.evaluate(next_fn())
if isinstance(elem, tuple):
actual.extend(elem)
else:
actual.extend(elem.tolist())
self.assertCountEqual(actual, expected)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_fn())
else:
self.assertDatasetProduces(dataset, list(chunk(expected, batch)))
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testFlatMapReaderPipeline(self, shuffle):
dataset = dataset_ops.Dataset.list_files(
self._filenames, shuffle=shuffle)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (3, 8)
for r in range(0, 10)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(batch_size=[1, 3, 10])))
def testDatasetOfReaderDatasetsPipeline(self, batch_size):
# This tests a scenario where a list_files main return multiple files
# due to the glob containing wildcards.
def batch(iterator, n):
l = len(iterator)
for i in range(0, l, n):
yield iterator[i:min(i + n, l)]
datasets = []
for files in batch(self._filenames, batch_size):
datasets.append(
dataset_ops.Dataset.list_files(files, shuffle=False).map(
core_readers.TFRecordDataset))
dataset = dataset_ops.Dataset.from_tensor_slices(datasets)
dataset = dataset.flat_map(lambda x: x)
# Simulate additional ops in between flat_map and interleave. This should be
# a no-op since if ShardDataset is placed right after flat_map, we will only
# have two datasets left at this point.
dataset = dataset.prefetch(1)
dataset = dataset.prefetch(1)
dataset = dataset.interleave(
lambda x: x, cycle_length=1, num_parallel_calls=1)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (0, 5)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testZipReaderPipeline(self):
dataset1 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=False)
dataset1 = dataset1.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset2 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=False)
dataset2 = dataset2.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset_ops.Dataset.zip((dataset1, dataset2))
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
(b"Record %d of file %d" % (r, f), b"Record %d of file %d" % (r, f)) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testConcatenateReaderPipeline(self, shuffle):
dataset1 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=shuffle)
dataset1 = dataset1.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset1 = dataset1.batch(5)
dataset2 = dataset_ops.Dataset.list_files(
self._filenames, shuffle=shuffle)
dataset2 = dataset2.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset2 = dataset2.batch(5)
dataset = dataset1.concatenate(dataset2)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
expected += expected
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 8, shuffle)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testPipelineWithMap(self, shuffle):
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.map(lambda x: string_ops.substr_v2(x, 2, 1000))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@combinations.generate(test_base.default_test_combinations())
def testDirectFilenameTFRecordReaderPipeline(self):
dataset = core_readers.TFRecordDataset(self._filenames)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (0, 5)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testValidPipelineWithRangeDataset(self, shuffle):
dataset = dataset_ops.Dataset.range(self._num_files)
dataset = dataset.map(lambda n: string_ops.string_join( # pylint:disable=g-long-lambda
[self.get_temp_dir(),
string_ops.string_format("/tf_record.{}.txt", [n])]))
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.map(lambda x: string_ops.substr_v2(x, 2, 1000))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"cord %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(params=[(1, 0, 10, 10), (2, 1, 20, 5),
(10, 1, 1, 10)])))
def testStandardReaderPipeline(self, params):
num_epochs, index, batch_size, parallel_reads = params
dataset = readers.make_tf_record_dataset(
file_pattern=self._filenames,
num_epochs=num_epochs,
batch_size=batch_size,
parser_fn=None,
num_parallel_reads=parallel_reads,
drop_final_batch=True,
shuffle=False)
dataset = distribute._AutoShardDataset(dataset, 2, index)
outputs = self.getNext(dataset)
self._verify_records(
outputs,
batch_size=batch_size,
file_index=[i for i in range(index, self._num_records, 2)],
num_epochs=num_epochs,
interleave_cycle_length=parallel_reads,
drop_final_batch=True,
use_parser_fn=None)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(outputs())
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(shuffle=[True, False])))
def testSampleResNetPipeline(self, shuffle):
dataset = dataset_ops.Dataset.list_files(
self._filenames, shuffle=shuffle)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for r in range(0, 10)
for f in (3, 8)
]
self.assertDatasetProducesWithShuffle(dataset, expected, 5, 4, shuffle)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(sharding_policy=[
distribute_options.AutoShardPolicy.DATA,
distribute_options.AutoShardPolicy.AUTO
])))
def testShardByDataBeforePrefetch(self, sharding_policy):
dataset = dataset_ops.Dataset.range(4)
dataset = dataset.apply(testing.assert_next(["Shard", "Prefetch"]))
dataset = dataset.prefetch(1)
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = sharding_policy
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 2, 0)
self.assertDatasetProduces(dataset, [0, 2])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.times(combinations.combine(
sharding_policy=[distribute_options.AutoShardPolicy.DATA,
distribute_options.AutoShardPolicy.FILE]),
combinations.combine(shuffle=[True, False]))))
def testReplicateAndShardProduceDisjointData(self, shuffle, sharding_policy):
dataset = dataset_ops.Dataset.list_files(self._filenames,
shuffle=shuffle)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
graph_def = dataset._as_serialized_graph(
strip_device_assignment=True,
external_state_policy=distribute_options.ExternalStatePolicy.WARN)
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = sharding_policy
ds1 = distribute._RemoteDataset(graph_def, "/device:CPU:0",
dataset.element_spec)
ds2 = distribute._RemoteDataset(graph_def, "/device:CPU:0",
dataset.element_spec)
ds1 = ds1.with_options(options)
ds2 = ds2.with_options(options)
ds1 = distribute._AutoShardDataset(ds1, 2, 0)
ds2 = distribute._AutoShardDataset(ds2, 2, 1)
elems1 = set(self.getAllDatasetElements(ds1))
elems2 = set(self.getAllDatasetElements(ds2))
self.assertEmpty(elems1.intersection(elems2))
@combinations.generate(test_base.default_test_combinations())
def testWorkersGreaterThanNumFilesWithDataSharding(self):
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = (
distribute_options.AutoShardPolicy.DATA)
dataset = core_readers._TFRecordDataset(self._filenames)
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
# Should return "Record (0,5) of file (0 --> 9)" since we are sharding by
# individual elements, we should be able to get some data from all files.
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in (0, 5)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testAutoshardPolicyOff(self):
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = (
distribute_options.AutoShardPolicy.OFF)
dataset = core_readers._TFRecordDataset(self._filenames)
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
# Should return every record in every file since autosharding is turned off.
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testFileShardingWithoutReaderDatasetOp(self):
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = (
distribute_options.AutoShardPolicy.FILE)
dataset = dataset_ops.Dataset.range(1024)
dataset = dataset.with_options(options)
# We are specifying that we want a file sharding policy, and this pipeline
# doesn't start with file reading, so we should error out.
with self.assertRaises(errors.NotFoundError):
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testWorkersGreaterThanNumFiles(self):
dataset = dataset_ops.Dataset.list_files(self._filenames)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 500, 499)
self.assertDatasetProduces(dataset, [])
@combinations.generate(test_base.default_test_combinations())
def testTFRecordReaderWithDirectFileNames(self):
# Using `_TFRecordDataset` creates a raw op rather than wrapping it around
# a flat_map automatically.
dataset = core_readers._TFRecordDataset(self._filenames)
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in (0, 5)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testTFRecordReaderWithDirectFileNamesAndShapes(self):
# Using `_TFRecordDataset` creates a raw op rather than wrapping it around
# a flat_map automatically.
dataset = core_readers._TFRecordDataset(self._filenames)
# BatchDataset contains `output_types` and `output_shapes`
dataset = dataset.batch(5)
dataset = distribute._AutoShardDataset(dataset, 2, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in range(0, 5)
]
self.assertDatasetProduces(dataset, list(chunk(expected, 5)))
@combinations.generate(test_base.default_test_combinations())
def testShardOutOfRange(self):
dataset = dataset_ops.Dataset.range(5)
with self.assertRaises(errors.InvalidArgumentError):
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testShardOutOfRangeEmptyDataset(self):
dataset = dataset_ops.Dataset.range(0)
with self.assertRaises(errors.OutOfRangeError):
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testNoReaderPipelines(self):
dataset = dataset_ops.Dataset.range(1024)
dataset = distribute._AutoShardDataset(dataset, 2, 0)
self.assertDatasetProduces(dataset, [i for i in range(1024) if i % 2 == 0])
@combinations.generate(test_base.default_test_combinations())
def testUnknownOpInPipelineStillShardsAtTheEnd(self):
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.apply(unique.unique())
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in range(0, 10)
for r in (0, 5)
]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testInvalidWorkerIndex(self):
dataset = dataset_ops.Dataset.list_files(self._filenames)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
with self.assertRaises(errors.InvalidArgumentError):
dataset = distribute._AutoShardDataset(dataset, 2, 2)
self.evaluate(self.getNext(dataset)())
@combinations.generate(test_base.default_test_combinations())
def testAssertCardinality(self):
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = dataset.apply(cardinality.assert_cardinality(42))
dataset = distribute._AutoShardDataset(dataset, 5, 0)
expected = [
b"Record %d of file %d" % (r, f) # pylint:disable=g-complex-comprehension
for f in (0, 5)
for r in range(0, 10)
]
self.assertDatasetProduces(dataset, list(chunk(expected, 5)))
@combinations.generate(test_base.default_test_combinations())
def testMakeBatchedFeaturesDataset(self):
files = 2
records_per_file = 5
def make_record(file_index):
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
"file":
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[file_index])),
}))
return example.SerializeToString()
filenames = []
for file_index in range(files):
filename = os.path.join(self.get_temp_dir(),
"tf_record.%d.txt" % file_index)
filenames.append(filename)
writer = python_io.TFRecordWriter(filename)
for _ in range(records_per_file):
writer.write(make_record(file_index))
writer.close()
dataset = readers.make_batched_features_dataset(
file_pattern=filenames,
batch_size=records_per_file,
features={
"file": parsing_ops.FixedLenFeature([], dtypes.int64),
},
reader=core_readers.TFRecordDataset,
num_epochs=1)
# We should shard at the file level, so that all records come from file 0.
dataset = distribute._AutoShardDataset(dataset, 2, 0)
dataset = dataset.unbatch()
output = self.getDatasetOutput(dataset)
files = [elem["file"] for elem in output]
self.assertEqual(files, [0] * records_per_file)
@combinations.generate(test_base.default_test_combinations())
def testHintShardingValidPattern(self):
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = (
distribute_options.AutoShardPolicy.HINT)
dataset = dataset_ops.Dataset.range(100).shard(distribute.SHARD_HINT, 0)
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.assertDatasetProduces(dataset, list(range(0, 100, 10)))
@combinations.generate(test_base.default_test_combinations())
def testHintShardingInvalidPattern(self):
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = (
distribute_options.AutoShardPolicy.HINT)
dataset = dataset_ops.Dataset.range(100).shard(1, 0)
dataset = dataset.with_options(options)
dataset = distribute._AutoShardDataset(dataset, 10, 0)
self.assertDatasetProduces(dataset, list(range(100)))
class AutoShardWithRebatchDatasetTest(tf_record_test_base.TFRecordTestBase,
parameterized.TestCase):
def _setUpFiles(self, num_files, num_records_per_file):
self._num_files = num_files
self._num_records = num_records_per_file
self._filenames = self._createFiles()
@combinations.generate(test_base.default_test_combinations())
def testFileShardingWithLegacyRebatch(self):
# Tests that RebatchDatasetV1 is a passthrough op.
self._setUpFiles(num_files=5, num_records_per_file=10)
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.apply(
testing.assert_next(["Shard", "FlatMap", "Batch", "Rebatch"]))
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = distribute._LegacyRebatchDataset(dataset, num_replicas=5)
dataset = distribute._AutoShardDataset(dataset, 5, 3)
expected = [[self._record(3, i)] for i in range(10)]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(test_base.default_test_combinations())
def testFileShardingWithRebatch(self):
# Tests that RebatchDatasetV2 is a passthrough op.
self._setUpFiles(num_files=3, num_records_per_file=5)
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.apply(
testing.assert_next(["Shard", "FlatMap", "Batch", "Rebatch"]))
dataset = dataset.flat_map(core_readers.TFRecordDataset)
dataset = dataset.batch(5)
dataset = distribute._RebatchDataset(dataset, batch_sizes=[2, 1, 2])
dataset = distribute._AutoShardDataset(dataset, 3, 1)
expected = [[self._record(1, 0), self._record(1, 1)], [self._record(1, 2)],
[self._record(1, 3), self._record(1, 4)]]
self.assertDatasetProduces(dataset, expected)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.times(
combinations.combine(sharding_policy=[
distribute_options.AutoShardPolicy.DATA,
distribute_options.AutoShardPolicy.AUTO
]), combinations.combine(with_prefetch=[True, False]))))
def testUseLegacyRebatchWithDataSharding(self, sharding_policy,
with_prefetch):
# This test simulates a distributed environment with 3 workers, each with
# 1 replica.
dataset = dataset_ops.Dataset.range(8)
dataset = dataset.batch(4)
options = dataset_ops.Options()
options.experimental_distribute.auto_shard_policy = sharding_policy
dataset = dataset.with_options(options)
# We expect the auto-shard rewrite to rewrite RebatchDatasetV2 to
# RebatchDataset(V1) for correctness reasons. This will modify the output
# of the dataset.
worker_a_dataset = distribute._RebatchDataset(
dataset, batch_sizes=[2, 1, 1])
if with_prefetch:
worker_a_dataset = worker_a_dataset.prefetch(1)
worker_a_dataset = distribute._AutoShardDataset(
worker_a_dataset, 3, 0, num_replicas=3)
expected = [[0, 1], [4, 5]]
self.assertDatasetProduces(worker_a_dataset, expected)
worker_b_dataset = distribute._RebatchDataset(
dataset, batch_sizes=[1, 1, 2])
if with_prefetch:
worker_b_dataset = worker_b_dataset.prefetch(1)
worker_b_dataset = distribute._AutoShardDataset(
worker_b_dataset, 3, 1, num_replicas=3)
expected = [[2, 3], [6, 7]]
self.assertDatasetProduces(worker_b_dataset, expected)
worker_c_dataset = distribute._RebatchDataset(
dataset, batch_sizes=[1, 2, 1])
if with_prefetch:
worker_c_dataset = worker_c_dataset.prefetch(1)
worker_c_dataset = distribute._AutoShardDataset(
worker_c_dataset, 3, 2, num_replicas=3)
expected = [[], []]
self.assertDatasetProduces(worker_c_dataset, expected)
class AutoShardDatasetCheckpointTest(tf_record_test_base.TFRecordTestBase,
checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def setUp(self):
super(AutoShardDatasetCheckpointTest, self).setUp()
self._num_files = 10
self._num_records = 10
self._filenames = self._createFiles()
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def test(self, verify_fn):
def build_dataset():
dataset = dataset_ops.Dataset.list_files(self._filenames, shuffle=False)
dataset = dataset.apply(
interleave_ops.parallel_interleave(core_readers.TFRecordDataset, 10))
dataset = distribute._AutoShardDataset(dataset, 5, 3)
return dataset
verify_fn(self, build_dataset, num_outputs=20)
if __name__ == "__main__":
test.main()
| |
# -*- test-case-name: foolscap.test.test_reconnector -*-
from twisted.trial import unittest
from foolscap.api import Tub, eventually, flushEventualQueue
from foolscap.test.common import HelperTarget, MakeTubsMixin
from twisted.internet import defer, reactor, error
from foolscap import negotiate
class AlwaysFailNegotiation(negotiate.Negotiation):
def evaluateHello(self, offer):
raise negotiate.NegotiationError("I always fail")
class Reconnector(MakeTubsMixin, unittest.TestCase):
def setUp(self):
self.tubA, self.tubB = self.makeTubs(2)
def tearDown(self):
d = defer.DeferredList([s.stopService() for s in self.services])
d.addCallback(flushEventualQueue)
return d
def test_try(self):
self.count = 0
self.attached = False
self.done = defer.Deferred()
target = HelperTarget("bob")
url = self.tubB.registerReference(target)
rc = self.tubA.connectTo(url, self._got_ref, "arg", kw="kwarg")
# at least make sure the stopConnecting method is present, even if we
# don't have a real test for it yet
self.failUnless(rc.stopConnecting)
return self.done
def _got_ref(self, rref, arg, kw):
self.failUnlessEqual(self.attached, False)
self.attached = True
self.failUnlessEqual(arg, "arg")
self.failUnlessEqual(kw, "kwarg")
self.count += 1
rref.notifyOnDisconnect(self._disconnected, self.count)
if self.count < 2:
# forcibly disconnect it
eventually(rref.tracker.broker.transport.loseConnection)
else:
self.done.callback("done")
def _disconnected(self, count):
self.failUnlessEqual(self.attached, True)
self.failUnlessEqual(count, self.count)
self.attached = False
def _connected(self, ref, notifiers, accumulate):
accumulate.append(ref)
if notifiers:
notifiers.pop(0).callback(ref)
def stall(self, timeout, res=None):
d = defer.Deferred()
reactor.callLater(timeout, d.callback, res)
return d
def test_retry(self):
tubC = Tub(certData=self.tubB.getCertData())
connects = []
target = HelperTarget("bob")
url = self.tubB.registerReference(target, "target")
portb = self.tub_ports[1]
d1 = defer.Deferred()
notifiers = [d1]
self.services.remove(self.tubB)
d = self.tubB.stopService()
def _start_connecting(res):
# this will fail, since tubB is not listening anymore
self.rc = self.tubA.connectTo(url, self._connected,
notifiers, connects)
# give it a few tries, then start tubC listening on the same port
# that tubB used to, which should allow the connection to
# complete (since they both use the same certData)
return self.stall(2)
d.addCallback(_start_connecting)
def _start_tubC(res):
self.failUnlessEqual(len(connects), 0)
self.services.append(tubC)
tubC.startService()
tubC.listenOn("tcp:%d:interface=127.0.0.1" % portb)
tubC.setLocation("127.0.0.1:%d" % portb)
url2 = tubC.registerReference(target, "target")
assert url2 == url
return d1
d.addCallback(_start_tubC)
def _connected(res):
self.failUnlessEqual(len(connects), 1)
self.rc.stopConnecting()
d.addCallback(_connected)
return d
def test_negotiate_fails_and_retry(self):
connects = []
target = HelperTarget("bob")
url = self.tubB.registerReference(target, "target")
l = self.tubB.getListeners()[0]
l._negotiationClass = AlwaysFailNegotiation
portb = self.tub_ports[1]
d1 = defer.Deferred()
notifiers = [d1]
self.rc = self.tubA.connectTo(url, self._connected,
notifiers, connects)
d = self.stall(2)
def _failed_a_few_times(res):
# the reconnector should have failed once or twice, since the
# negotiation would always fail.
self.failUnlessEqual(len(connects), 0)
# Now we fix tubB. We only touched the Listener, so re-doing the
# listenOn should clear it.
return self.tubB.stopListeningOn(l)
d.addCallback(_failed_a_few_times)
def _stopped(res):
self.tubB.listenOn("tcp:%d:interface=127.0.0.1" % portb)
# the next time the reconnector tries, it should succeed
return d1
d.addCallback(_stopped)
def _connected(res):
self.failUnlessEqual(len(connects), 1)
self.rc.stopConnecting()
d.addCallback(_connected)
return d
def test_lose_and_retry(self):
tubC = Tub(self.tubB.getCertData())
connects = []
d1 = defer.Deferred()
d2 = defer.Deferred()
notifiers = [d1, d2]
target = HelperTarget("bob")
url = self.tubB.registerReference(target, "target")
portb = self.tub_ports[1]
self.rc = self.tubA.connectTo(url, self._connected,
notifiers, connects)
def _connected_first(res):
# we are now connected to tubB. Shut it down to force a
# disconnect.
self.services.remove(self.tubB)
d = self.tubB.stopService()
return d
d1.addCallback(_connected_first)
def _wait(res):
# wait a few seconds to give the Reconnector a chance to try and
# fail a few times
return self.stall(2)
d1.addCallback(_wait)
def _start_tubC(res):
# now start tubC listening on the same port that tubB used to,
# which should allow the connection to complete (since they both
# use the same certData)
self.services.append(tubC)
tubC.startService()
tubC.listenOn("tcp:%d:interface=127.0.0.1" % portb)
tubC.setLocation("127.0.0.1:%d" % portb)
url2 = tubC.registerReference(target, "target")
assert url2 == url
# this will fire when the second connection has been made
return d2
d1.addCallback(_start_tubC)
def _connected(res):
self.failUnlessEqual(len(connects), 2)
self.rc.stopConnecting()
d1.addCallback(_connected)
return d1
def test_stop_trying(self):
connects = []
target = HelperTarget("bob")
url = self.tubB.registerReference(target, "target")
d1 = defer.Deferred()
self.services.remove(self.tubB)
d = self.tubB.stopService()
def _start_connecting(res):
# this will fail, since tubB is not listening anymore
self.rc = self.tubA.connectTo(url, self._connected, d1, connects)
self.rc.verbose = True # get better code coverage
# give it a few tries, then tell it to stop trying
return self.stall(2)
d.addCallback(_start_connecting)
def _stop_trying(res):
self.failUnlessEqual(len(connects), 0)
f = self.rc.getLastFailure()
self.failUnless(f.check(error.ConnectionRefusedError))
delay = self.rc.getDelayUntilNextAttempt()
self.failUnless(delay > 0, delay)
self.failUnless(delay < 60, delay)
self.rc.reset()
delay = self.rc.getDelayUntilNextAttempt()
self.failUnless(delay < 2)
# this stopConnecting occurs while the reconnector's timer is
# active
self.rc.stopConnecting()
self.failUnlessEqual(self.rc.getDelayUntilNextAttempt(), None)
d.addCallback(_stop_trying)
# if it keeps trying, we'll see a dirty reactor
return d
# another test: determine the target url early, but don't actually register
# the reference yet. Start the reconnector, let it fail once, then register
# the reference and make sure the retry succeeds. This will distinguish
# between connection/negotiation failures and object-lookup failures, both of
# which ought to be handled by Reconnector. I suspect the object-lookup
# failures are not yet.
# test that Tub shutdown really stops all Reconnectors
| |
#!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- getblockchaininfo
- gettxoutsetinfo
- getdifficulty
- getbestblockhash
- getblockhash
- getblockheader
- getchaintxstats
- getnetworkhashps
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import os
import subprocess
from test_framework.blocktools import (
create_block,
create_coinbase,
TIME_GENESIS_BLOCK,
)
from test_framework.messages import (
CBlockHeader,
FromHex,
msg_block,
)
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises,
assert_raises_rpc_error,
assert_is_hex_string,
assert_is_hash_string,
get_datadir_path,
)
from test_framework.wallet import MiniWallet
class BlockchainTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.supports_cli = False
def run_test(self):
self.mine_chain()
self.restart_node(0, extra_args=['-stopatheight=207', '-prune=1']) # Set extra args with pruning after rescan is complete
self._test_getblockchaininfo()
self._test_getchaintxstats()
self._test_gettxoutsetinfo()
self._test_getblockheader()
self._test_getdifficulty()
self._test_getnetworkhashps()
self._test_stopatheight()
self._test_waitforblockheight()
self._test_getblock()
assert self.nodes[0].verifychain(4, 0)
def mine_chain(self):
self.log.info('Create some old blocks')
address = self.nodes[0].get_deterministic_priv_key().address
for t in range(TIME_GENESIS_BLOCK, TIME_GENESIS_BLOCK + 200 * 600, 600):
# ten-minute steps from genesis block time
self.nodes[0].setmocktime(t)
self.nodes[0].generatetoaddress(1, address)
assert_equal(self.nodes[0].getblockchaininfo()['blocks'], 200)
def _test_getblockchaininfo(self):
self.log.info("Test getblockchaininfo")
keys = [
'bestblockhash',
'blocks',
'chain',
'chainwork',
'difficulty',
'headers',
'initialblockdownload',
'mediantime',
'pruned',
'size_on_disk',
'softforks',
'verificationprogress',
'warnings',
]
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if manual pruning is enabled
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning'] + keys))
# size_on_disk should be > 0
assert_greater_than(res['size_on_disk'], 0)
# pruneheight should be greater or equal to 0
assert_greater_than_or_equal(res['pruneheight'], 0)
# check other pruning fields given that prune=1
assert res['pruned']
assert not res['automatic_pruning']
self.restart_node(0, ['-stopatheight=207'])
res = self.nodes[0].getblockchaininfo()
# should have exact keys
assert_equal(sorted(res.keys()), keys)
self.restart_node(0, ['-stopatheight=207', '-prune=550'])
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if prune=550
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning', 'prune_target_size'] + keys))
# check related fields
assert res['pruned']
assert_equal(res['pruneheight'], 0)
assert res['automatic_pruning']
assert_equal(res['prune_target_size'], 576716800)
assert_greater_than(res['size_on_disk'], 0)
assert_equal(res['softforks'], {
'bip34': {'type': 'buried', 'active': False, 'height': 500},
'bip66': {'type': 'buried', 'active': False, 'height': 1251},
'bip65': {'type': 'buried', 'active': False, 'height': 1351},
'csv': {'type': 'buried', 'active': False, 'height': 432},
'segwit': {'type': 'buried', 'active': True, 'height': 0},
'testdummy': {
'type': 'bip9',
'bip9': {
'status': 'started',
'bit': 28,
'start_time': 0,
'timeout': 0x7fffffffffffffff, # testdummy does not have a timeout so is set to the max int64 value
'since': 144,
'statistics': {
'period': 144,
'threshold': 108,
'elapsed': 57,
'count': 57,
'possible': True,
},
},
'active': False
},
'taproot': {
'type': 'bip9',
'bip9': {
'status': 'active',
'start_time': -1,
'timeout': 9223372036854775807,
'since': 0
},
'height': 0,
'active': True
}
})
def _test_getchaintxstats(self):
self.log.info("Test getchaintxstats")
# Test `getchaintxstats` invalid extra parameters
assert_raises_rpc_error(-1, 'getchaintxstats', self.nodes[0].getchaintxstats, 0, '', 0)
# Test `getchaintxstats` invalid `nblocks`
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].getchaintxstats, '')
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, -1)
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, self.nodes[0].getblockcount())
# Test `getchaintxstats` invalid `blockhash`
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getchaintxstats, blockhash=0)
assert_raises_rpc_error(-8, "blockhash must be of length 64 (not 1, for '0')", self.nodes[0].getchaintxstats, blockhash='0')
assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getchaintxstats, blockhash='ZZZ0000000000000000000000000000000000000000000000000000000000000')
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getchaintxstats, blockhash='0000000000000000000000000000000000000000000000000000000000000000')
blockhash = self.nodes[0].getblockhash(200)
self.nodes[0].invalidateblock(blockhash)
assert_raises_rpc_error(-8, "Block is not in main chain", self.nodes[0].getchaintxstats, blockhash=blockhash)
self.nodes[0].reconsiderblock(blockhash)
chaintxstats = self.nodes[0].getchaintxstats(nblocks=1)
# 200 txs plus genesis tx
assert_equal(chaintxstats['txcount'], 201)
# tx rate should be 1 per 10 minutes, or 1/600
# we have to round because of binary math
assert_equal(round(chaintxstats['txrate'] * 600, 10), Decimal(1))
b1_hash = self.nodes[0].getblockhash(1)
b1 = self.nodes[0].getblock(b1_hash)
b200_hash = self.nodes[0].getblockhash(200)
b200 = self.nodes[0].getblock(b200_hash)
time_diff = b200['mediantime'] - b1['mediantime']
chaintxstats = self.nodes[0].getchaintxstats()
assert_equal(chaintxstats['time'], b200['time'])
assert_equal(chaintxstats['txcount'], 201)
assert_equal(chaintxstats['window_final_block_hash'], b200_hash)
assert_equal(chaintxstats['window_final_block_height'], 200)
assert_equal(chaintxstats['window_block_count'], 199)
assert_equal(chaintxstats['window_tx_count'], 199)
assert_equal(chaintxstats['window_interval'], time_diff)
assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(199))
chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1_hash)
assert_equal(chaintxstats['time'], b1['time'])
assert_equal(chaintxstats['txcount'], 2)
assert_equal(chaintxstats['window_final_block_hash'], b1_hash)
assert_equal(chaintxstats['window_final_block_height'], 1)
assert_equal(chaintxstats['window_block_count'], 0)
assert 'window_tx_count' not in chaintxstats
assert 'window_interval' not in chaintxstats
assert 'txrate' not in chaintxstats
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('8725.00000000'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bogosize'], 15000),
assert_equal(res['bestblock'], node.getblockhash(200))
size = res['disk_size']
assert size > 6400
assert size < 64000
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block")
b1hash = node.getblockhash(1)
node.invalidateblock(b1hash)
res2 = node.gettxoutsetinfo()
assert_equal(res2['transactions'], 0)
assert_equal(res2['total_amount'], Decimal('0'))
assert_equal(res2['height'], 0)
assert_equal(res2['txouts'], 0)
assert_equal(res2['bogosize'], 0),
assert_equal(res2['bestblock'], node.getblockhash(0))
assert_equal(len(res2['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block")
node.reconsiderblock(b1hash)
res3 = node.gettxoutsetinfo()
# The field 'disk_size' is non-deterministic and can thus not be
# compared between res and res3. Everything else should be the same.
del res['disk_size'], res3['disk_size']
assert_equal(res, res3)
self.log.info("Test hash_type option for gettxoutsetinfo()")
# Adding hash_type 'hash_serialized_2', which is the default, should
# not change the result.
res4 = node.gettxoutsetinfo(hash_type='hash_serialized_2')
del res4['disk_size']
assert_equal(res, res4)
# hash_type none should not return a UTXO set hash.
res5 = node.gettxoutsetinfo(hash_type='none')
assert 'hash_serialized_2' not in res5
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises_rpc_error(-8, "hash must be of length 64 (not 8, for 'nonsense')", node.getblockheader, "nonsense")
assert_raises_rpc_error(-8, "hash must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", node.getblockheader, "ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")
assert_raises_rpc_error(-5, "Block not found", node.getblockheader, "0cf7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844")
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(blockhash=besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_equal(header['nTx'], 1)
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
# Test with verbose=False, which should return the header as hex.
header_hex = node.getblockheader(blockhash=besthash, verbose=False)
assert_is_hex_string(header_hex)
header = FromHex(CBlockHeader(), header_hex)
header.calc_sha256()
assert_equal(header.hash, besthash)
def _test_getdifficulty(self):
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
def _test_getnetworkhashps(self):
hashes_per_second = self.nodes[0].getnetworkhashps()
# This should be 2 hashes every 10 minutes or 1/300
assert abs(hashes_per_second * 300 - 1) < 0.0001
def _test_stopatheight(self):
assert_equal(self.nodes[0].getblockcount(), 200)
self.nodes[0].generatetoaddress(6, self.nodes[0].get_deterministic_priv_key().address)
assert_equal(self.nodes[0].getblockcount(), 206)
self.log.debug('Node should not stop at this height')
assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3))
try:
self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)
except (ConnectionError, http.client.BadStatusLine):
pass # The node already shut down before response
self.log.debug('Node should stop at this height...')
self.nodes[0].wait_until_stopped()
self.start_node(0)
assert_equal(self.nodes[0].getblockcount(), 207)
def _test_waitforblockheight(self):
self.log.info("Test waitforblockheight")
node = self.nodes[0]
peer = node.add_p2p_connection(P2PInterface())
current_height = node.getblock(node.getbestblockhash())['height']
# Create a fork somewhere below our current height, invalidate the tip
# of that fork, and then ensure that waitforblockheight still
# works as expected.
#
# (Previously this was broken based on setting
# `rpc/blockchain.cpp:latestblock` incorrectly.)
#
b20hash = node.getblockhash(20)
b20 = node.getblock(b20hash)
def solve_and_send_block(prevhash, height, time):
b = create_block(prevhash, create_coinbase(height), time)
b.solve()
peer.send_and_ping(msg_block(b))
return b
b21f = solve_and_send_block(int(b20hash, 16), 21, b20['time'] + 1)
b22f = solve_and_send_block(b21f.sha256, 22, b21f.nTime + 1)
node.invalidateblock(b22f.hash)
def assert_waitforheight(height, timeout=2):
assert_equal(
node.waitforblockheight(height=height, timeout=timeout)['height'],
current_height)
assert_waitforheight(0)
assert_waitforheight(current_height - 1)
assert_waitforheight(current_height)
assert_waitforheight(current_height + 1)
def _test_getblock(self):
node = self.nodes[0]
miniwallet = MiniWallet(node)
miniwallet.generate(5)
node.generate(100)
fee_per_byte = Decimal('0.00000010')
fee_per_kb = 1000 * fee_per_byte
miniwallet.send_self_transfer(fee_rate=fee_per_kb, from_node=node)
blockhash = node.generate(1)[0]
self.log.info("Test that getblock with verbosity 1 doesn't include fee")
block = node.getblock(blockhash, 1)
assert 'fee' not in block['tx'][1]
self.log.info('Test that getblock with verbosity 2 includes expected fee')
block = node.getblock(blockhash, 2)
tx = block['tx'][1]
assert 'fee' in tx
assert_equal(tx['fee'], tx['vsize'] * fee_per_byte)
self.log.info("Test that getblock with verbosity 2 still works with pruned Undo data")
datadir = get_datadir_path(self.options.tmpdir, 0)
def move_block_file(old, new):
old_path = os.path.join(datadir, self.chain, 'blocks', old)
new_path = os.path.join(datadir, self.chain, 'blocks', new)
os.rename(old_path, new_path)
# Move instead of deleting so we can restore chain state afterwards
move_block_file('rev00000.dat', 'rev_wrong')
block = node.getblock(blockhash, 2)
assert 'fee' not in block['tx'][1]
# Restore chain state
move_block_file('rev_wrong', 'rev00000.dat')
if __name__ == '__main__':
BlockchainTest().main()
| |
"""Snipppets of potentially reusable code that don't deserve their
own library."""
import bisect
import contextlib
import inspect
import os
import shutil
import signal
import sys
import tempfile
import unittest
from functools import wraps
class Location:
def __init__(self, name, line, col):
self.name = name
self.line = line
self.col = col
def __str__(self):
return "{}:{}.{}".format(self.name, self.line, self.col)
class Locator:
"""Locator provides a way to convert an absolute offset in a
string into a Location object.
"""
def __init__(self, data, name='<string>'):
self.name = name
self.line_offsets = [0]
for line_len in map(len, data.splitlines(True)):
self.line_offsets.append(self.line_offsets[-1] + line_len)
def locate(self, offset):
"""Return a Location() object for the given offset."""
line = bisect.bisect_right(self.line_offsets, offset)
col = offset - self.line_offsets[line - 1]
return Location(self.name, line, col)
def import_from_dir(module_name, dir_name):
"""Import a module form a specific directory.
Sometimes you might want to load a package from a specific
directory. For example, you may be loading a plugin of some
description.
This function ensures that only modules from a specific
directory are loaded to avoid any chance of loading a
module of the same name from somewhere else.
After loading the module is removed from sys.modules to
avoid other namespace clashes.
"""
saved_sys_path = sys.path
saved_module = None
if module_name in sys.modules:
saved_module = sys.modules[module_name]
try:
module = __import__(module_name)
return module
finally:
sys.path = saved_sys_path
if saved_module:
sys.modules[module_name] = saved_module
else:
del sys.modules[module_name]
def dict_inverse(dct, exact=False):
"""Given an input dictionary (`dct`), create a new dictionary
where the keys are indexed by the values. In the original
dictionary multiple keys may reference the same value, so the
values in the new dictionary is a list of keys. The order of keys
in the list is undefined.
Example:
> dict_inverse({1: 'a', 2: 'a', 3: 'c'})
{ 'a': [1, 2], 'c': [3]
If `dct` has an exact inverse mapping `exact` can be passed as
True. In this case, the values will be just the original key (not
a list).
Example:
> dict_inverse({1: 'a', 2: 'b', 3: 'c'}, exact=True)
{ 'a': 1, 'b': 2, 'c': 3}
Note: No checking is done when exact is True, so in the case
where there are multiple keys mapping the same value it is
undefined as to which key the value will map to.
"""
if exact:
return {value: key for key, value in dct.items()}
r = {}
for key, value in dct.items():
r.setdefault(value, []).append(key)
return r
class _GeneratorSimpleContextManager(contextlib._GeneratorContextManager):
"""Helper for @simplecontextmanager decorator."""
def __exit__(self, type, value, traceback):
if type is None:
try:
next(self.gen)
except StopIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
next(self.gen)
except StopIteration as exc:
# Suppress the exception *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed
return exc is not value
else:
raise RuntimeError("generator didn't stop")
finally:
return False
def simplecontextmanager(func):
"""@simplecontextmanager decorator.
Typical usage:
@simplecontextmanager
def some_generator(<arguments>):
<setup>
yield <value>
<cleanup>
This makes this:
with some_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return _GeneratorSimpleContextManager(func, *args, **kwds)
return helper
@simplecontextmanager
def chdir(path):
"""Current-working directory context manager.
Makes the current working directory the specified `path` for the
duration of the context.
Example:
with chdir("newdir"):
# Do stuff in the new directory
pass
"""
cwd = os.getcwd()
os.chdir(path)
yield
os.chdir(cwd)
@simplecontextmanager
def umask(new_mask):
"""unmask context manager.
Makes `new_mask` the current mask, and restores the previous umask
after the context closes.
"""
cur_mask = os.umask(new_mask)
yield
os.umask(cur_mask)
@simplecontextmanager
def update_env(env):
"""os.environ context manager.
Updates os.environ with the specified `env` for the duration of the context.
"""
old_env = {}
for key in env:
old_env[key] = os.environ.get(key)
os.environ[key] = env[key]
yield
for key in old_env:
if old_env[key] is None:
del os.environ[key]
else:
os.environ[key] = old_env[key]
@simplecontextmanager
def tempdir():
tmpdir = tempfile.mkdtemp()
yield tmpdir
shutil.rmtree(tmpdir)
def touch(path):
"""Create an empty file (just like the unix touch command)."""
open(path, 'w').close()
def file_list(root, full_path=False, sort=True):
if not root.endswith('/'):
root += '/'
for base, dirs, files in os.walk(root):
if sort:
dirs.sort()
files.sort()
if not full_path:
base = base[len(root):]
for f in files:
yield os.path.join(base, f)
SIG_NAMES = dict((k, v) for v, k in signal.__dict__.items() if v.startswith('SIG'))
def show_exit(exit_code):
sig_num = exit_code & 0xff
exit_status = exit_code >> 8
if sig_num == 0:
return "exit: {}".format(exit_status)
else:
return "signal: {}".format(SIG_NAMES.get(sig_num, 'Unknown signal {}'.format(sig_num)))
class TestUtil(unittest.TestCase):
def test_chdir(self):
cur = os.getcwd()
with chdir('/'):
assert os.getcwd() == '/'
assert os.getcwd() == cur
def test_dict_inverse(self):
assert dict_inverse({1: 'a', 2: 'a', 3: 'c'}) == {'a': [1, 2], 'c': [3]}
assert dict_inverse({1: 'a', 2: 'b', 3: 'c'}, True) == {'a': 1, 'b': 2, 'c': 3}
def test_simplecontextmanager(self):
before = None
after = None
@simplecontextmanager
def foo():
nonlocal before
nonlocal after
after = None
before = True
yield 1
after = True
with foo() as x:
assert x == 1
assert before
assert after is None
assert before
assert after
try:
with foo() as x:
assert x == 1
assert before
assert after is None
raise Exception('check')
except Exception as exc:
assert exc.args == ('check', )
assert before
assert after
else:
assert False
def test_simplecontextmanager_double_yield(self):
before = None
after = None
@simplecontextmanager
def foo():
nonlocal before
nonlocal after
after = None
before = True
yield 1
yield 2
after = True
try:
with foo() as x:
assert x == 1
assert before
assert after is None
except RuntimeError as exc:
assert exc.args == ("generator didn't stop", )
else:
assert False
def test_simplecontextmanager_raise(self):
before = None
after = None
@simplecontextmanager
def foo():
nonlocal before
nonlocal after
after = None
before = True
yield 1
raise Exception("with")
try:
with foo() as x:
assert x == 1
assert before
assert after is None
except Exception as exc:
assert exc.args == ('with', )
assert before
assert after is None
else:
assert False
try:
with foo() as x:
assert x == 1
assert before
assert after is None
raise Exception("check")
except Exception as exc:
assert exc.args == ('check', )
assert before
assert after is None
else:
assert False
def test_tempdir(self):
tempdir_name = None
with tempdir() as t:
tempdir_name = t
assert os.path.exists(tempdir_name)
assert not os.path.exists(tempdir_name)
try:
with tempdir() as t:
tempdir_name = t
assert os.path.exists(tempdir_name)
raise Exception('tempdir_with_fail')
except Exception as exc:
assert exc.args == ('tempdir_with_fail', )
else:
assert False
assert not os.path.exists(tempdir_name)
def test_file_list(self):
with tempdir() as t:
touch(os.path.join(t, 'b'))
touch(os.path.join(t, 'a'))
os.mkdir(os.path.join(t, 'c'))
touch(os.path.join(t, 'c', '1'))
assert list(file_list(t)) == ['a', 'b', 'c/1']
def test_show_exit(self):
assert show_exit(os.system("exit 1")) == "exit: 1"
assert show_exit(os.system("exit 2")) == "exit: 2"
assert show_exit(os.system("kill -9 $$")) == "signal: SIGKILL"
def get_gcc_headers():
"""Get the header locations."""
output = subprocess.check_output(["arm-none-eabi-cpp", "-Wp,-v"], stderr=subprocess.STDOUT, stdin=subprocess.PIPE).decode().splitlines()
start = False
include_paths = []
for line in output:
if not start and line == '#include <...> search starts here:':
start = True
elif start:
if line == 'End of search list.':
break
else:
include_paths.append(line.strip())
return include_paths
def range1(n):
yield from range(1, n + 1)
def debug(msg):
print(msg.format(**inspect.stack()[1][0].f_locals))
class SysExit(Exception):
def __init__(self, code, msg=None):
super().__init__()
self.code = code
self.msg = msg
def script():
s = inspect.stack()[1][0]
caller_name = s.f_locals['__name__']
if caller_name != '__main__':
return
caller_main = s.f_locals.get('main')
if caller_main is None:
print("main() not found.", file=sys.stderr)
sys.exit(1)
try:
sys.exit(caller_main())
except SysExit as e:
if e.msg:
print(e.msg, file=sys.stderr)
sys.exit(e.code)
except KeyboardInterrupt:
# FIXME: This could probably be handled
# better to match the Ctrl-C signal exit
# code
sys.exit(1)
def camelify(name):
"""Change from foo_bar => FooBar"""
return ''.join([i.capitalize() for i in name.split('_')])
def yield_until_exception(exception, fn):
"""Generate values be repeatedly calling fn() until the specified exception
is raised.
"""
while True:
try:
yield fn()
except exception:
break
def attr_dict(itr, attr):
"""Given an iterable create a dictionary of the values indexed by a named attribute."""
return {getattr(x, attr): x for x in itr}
def attr_dict_grouped(itr, attr):
d = {}
for v in itr:
d.setdefault(getattr(v, attr), []).append(v)
return d
def attr_dict_frozen(itr, attr):
"""Given an iterable create a dictionary of the values indexed by a named attribute."""
return frozendict(attr_dict(itr, attr))
class frozendict(dict):
"""Inspired from: http://code.activestate.com/recipes/414283-frozen-dictionaries/"""
__slots__ = ('_hash')
@property
def _blocked_attribute(obj):
raise AttributeError("A frozendict cannot be modified.")
__delitem__ = __setitem__ = clear = _blocked_attribute
pop = popitem = setdefault = update = _blocked_attribute
def __new__(cls, *args):
new = dict.__new__(cls)
dict.__init__(new, *args)
new._hash = hash(frozenset(new.items()))
return new
def __init__(self, *args):
pass
def __hash__(self):
return self._hash
def __repr__(self):
return "frozendict(%s)" % dict.__repr__(self)
def split_inclusive(lst, condition):
start = 0
for idx in range(len(lst)):
if condition(lst[idx]):
yield lst[start:idx]
start = idx
yield lst[start:]
def tuple_gen(obj, name_x, name_y):
for x in getattr(obj, name_x):
for y in getattr(x, name_y):
yield x, y
def dict_grouped_by_key(itr, key):
d = {}
for v in itr:
d.setdefault(key(v), []).append(v)
return d
def find_duplicates_by_key(itr, key):
kv = dict_grouped_by_key(itr, key)
return {k: v for k, v in kv.items() if len(v) > 1}
def remove_duplicates_by_key(itr, key):
seen = set()
for v in itr:
k = key(v)
if k not in seen:
yield v
seen.add(k)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Violin Memory iSCSI Driver for Openstack Cinder
Uses Violin REST API via XG-Tools to manage a standard V6000 series
flash array to provide network block-storage services.
by Ryan Lucio
Senior Software Engineer
Violin Memory
---
Each allocated lun is configured as writable with a 512b blocksize.
*** Note that The fields for zero'ing the lun or performing thin
provisioning are not supported by vSHARE at this time. ***
Each new volume/lun is exported to a new iSCSI target specifically
made for it. The idea is that this allows CHAP authentication to be
managed independently on a per-volume basis. The export is then
configured to use a specific initiator group (igroup) that has been
pre-configured for use by Nova/Cinder hosts.
When an initiator has been chosen to connect to one of the available
luns (eg via 'nova volume-attach'), it will be added to the configured
igroup allowing it to see the export. It will also be given any
target location and authentication information needed to connect to
the chosen lun.
Driver support (verified for G5.5.2):
-------------------------------------
Volume Create/Delete: YES
Volume Attach/Detach: YES
Snapshot Create/Delete: NO
Create Volume from Snapshot: NO
Get Volume Stats: YES
Copy Image to Volume: YES
Copy Volume to Image: YES
Clone Volume: NO
"""
import time
import random
from oslo.config import cfg
from cinder import exception
from cinder import flags
from cinder.openstack.common import log as logging
from cinder.volume.driver import ISCSIDriver
LOG = logging.getLogger(__name__)
try:
from vxg.core.session import XGSession
from vxg.core.node import XGNode
import vxg
except ImportError:
LOG.exception(
_("The Violin v6000 driver for Cinder requires the presence of " +
"the Violin 'XG-Tools', python libraries for facilitating " +
"communication between applications and the v6000 XML API. " +
"The libraries can be downloaded from the Violin Memory " +
"support website at http://www.violin-memory.com/support"))
raise
else:
LOG.info(_("Running with xg-tools version: %s" % vxg.__version__))
violin_opts = [
cfg.StrOpt('gateway_vip',
default='',
help='IP address or hostname of the v6000 master VIP'),
cfg.StrOpt('gateway_mga',
default='',
help='IP address or hostname of mg-a'),
cfg.StrOpt('gateway_mgb',
default='',
help='IP address or hostname of mg-b'),
cfg.StrOpt('gateway_user',
default='admin',
help='User name for connecting to the Memory Gateway'),
cfg.StrOpt('gateway_password',
default='',
help='User name for connecting to the Memory Gateway'),
cfg.IntOpt('gateway_iscsi_port',
default=3260,
help='IP port to use for iSCSI targets'),
cfg.StrOpt('gateway_iscsi_target_prefix',
default='iqn.2004-02.com.vmem:',
help='prefix for iscsi volumes'),
cfg.StrOpt('gateway_iscsi_igroup_name',
default='openstack',
help='name of igroup for initiators'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(violin_opts)
class InvalidBackendConfig(exception.CinderException):
message = _("Volume backend config is invalid: %(reason)s")
class ViolinDriver(ISCSIDriver):
""" Executes commands relating to Violin Memory Arrays """
def __init__(self, *args, **kwargs):
super(ViolinDriver, self).__init__(*args, **kwargs)
self.session_start_time = 0
self.session_timeout = 900
self.array_info = []
self.vmem_vip=None
self.vmem_mga=None
self.vmem_mgb=None
self.container=""
self.device_id=""
self.stats={}
def do_setup(self, context):
""" Any initialization the driver does while starting """
if not FLAGS.gateway_vip:
raise exception.InvalidInput(reason=_(
'Gateway VIP is not set'))
if not FLAGS.gateway_mga:
raise exception.InvalidInput(reason=_(
'Gateway IP for mg-a is not set'))
if not FLAGS.gateway_mgb:
raise exception.InvalidInput(reason=_(
'Gateway IP for mg-b is not set'))
self.vmem_vip = vxg.open(FLAGS.gateway_vip, FLAGS.gateway_user,
FLAGS.gateway_password)
self.vmem_mga = vxg.open(FLAGS.gateway_mga, FLAGS.gateway_user,
FLAGS.gateway_password)
self.vmem_mgb = vxg.open(FLAGS.gateway_mgb, FLAGS.gateway_user,
FLAGS.gateway_password)
self.gateway_iscsi_ip_addresses_mga = self._get_active_iscsi_ips(self.vmem_mga)
for ip in self.gateway_iscsi_ip_addresses_mga:
self.array_info.append({ "node": self._get_hostname('mga'),
"addr": ip,
"conn": self.vmem_mga })
self.gateway_iscsi_ip_addresses_mgb = self._get_active_iscsi_ips(self.vmem_mgb)
for ip in self.gateway_iscsi_ip_addresses_mgb:
self.array_info.append({ "node": self._get_hostname('mgb'),
"addr": ip,
"conn": self.vmem_mgb })
vip = self.vmem_vip.basic
ret_dict = vip.get_node_values("/vshare/state/local/container/*")
if ret_dict:
self.container = ret_dict.items()[0][1]
ret_dict = vip.get_node_values(
"/media/state/array/%s/chassis/system/dev_id" % self.container)
if ret_dict:
self.device_id = ret_dict.items()[0][1]
ret_dict = vip.get_node_values("/wsm/inactivity_timeout")
if ret_dict:
self.timeout = ret_dict.items()[0][1]
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
vip = self.vmem_vip.basic
if len(self.container) == 0:
raise InvalidBackendConfig(reason=_('container is missing'))
if len(self.device_id) == 0:
raise InvalidBackendConfig(reason=_('device ID is missing'))
bn = "/vshare/config/iscsi/enable"
resp = vip.get_node_values(bn)
if resp[bn] != True:
raise InvalidBackendConfig(reason=_('iSCSI is not enabled'))
bn = "/vshare/config/igroup/%s" % FLAGS.gateway_iscsi_igroup_name
resp = vip.get_node_values(bn)
if len(resp.keys()) == 0:
raise InvalidBackendConfig(reason=_('igroup is missing'))
if len(self.gateway_iscsi_ip_addresses_mga) == 0:
raise InvalidBackendConfig(reason=_(
'no available iSCSI IPs on mga'))
if len(self.gateway_iscsi_ip_addresses_mgb) == 0:
raise InvalidBackendConfig(reason=_(
'no available iSCSI IPs on mgb'))
def create_volume(self, volume):
""" Creates a volume """
self._login()
self._create_lun(volume)
def delete_volume(self, volume):
""" Deletes a volume """
self._login()
self._delete_lun(volume)
def create_volume_from_snapshot(self, volume, snapshot):
""" Creates a volume from a snapshot """
# NYI (RDL: The V6000's 5.x.x platform does not support Data
# Management features)
#
raise NotImplementedError
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
# NYI (RDL: The V6000's 5.x.x platform does not support Data
# Management features)
#
raise NotImplementedError
def create_snapshot(self, snapshot):
""" Creates a snapshot from an existing volume """
# NYI (RDL: The V6000's 5.x.x platform does not support Data
# Management features)
#
raise NotImplementedError
def delete_snapshot(self, snapshot):
""" Deletes a snapshot """
# NYI (RDL: The V6000's 5.x.x platform does not support Data
# Management features)
#
raise NotImplementedError
def ensure_export(self, context, volume):
"""Synchronously checks and re-exports volumes at cinder start time """
pass
def create_export(self, context, volume):
""" Exports the volume """
pass
def remove_export(self, context, volume):
""" Removes an export for a logical volume """
pass
def initialize_connection(self, volume, connector):
"""Initializes the connection (target<-->initiator) """
# TODO (rdl): it appears that the backend may not have finished exporting
# by the time the client attempts to connect in some cases. We can do
# nothing and just force the user to retry or we can do some timing "mgmt"
#
self._login()
vol = self._get_short_name(volume['name'])
tgt = self._create_iscsi_target(volume)
lun = self._export_lun(volume)
iqn = "%s%s:%s" % (FLAGS.gateway_iscsi_target_prefix, tgt['node'], vol)
self._add_igroup_member(connector)
self.vmem_vip.basic.save_config()
properties = {}
properties['target_discovered'] = False
properties['target_portal'] = '%s:%s' % (tgt['addr'], '3260')
properties['target_iqn'] = iqn
properties['target_lun'] = lun
properties['volume_id'] = volume['id']
properties['auth_method'] = 'CHAP'
properties['auth_username'] = ''
properties['auth_password'] = ''
return {'driver_volume_type': 'iscsi', 'data': properties}
def terminate_connection(self, volume, connector, force=False, **kwargs):
""" Terminates the connection (target<-->initiator) """
super(ViolinDriver, self).terminate_connection(volume, connector)
self._login()
self._remove_igroup_member(connector)
self._unexport_lun(volume)
self._delete_iscsi_target(volume)
self.vmem_vip.basic.save_config()
def get_volume_stats(self, refresh=False):
""" Get volume stats """
if refresh or not self.stats:
self._update_stats()
return self.stats
def _create_lun(self, volume):
"""
Creates a new lun.
The equivalent CLI command is "lun create container
<container_name> name <lun_name> size <gb>"
Arguments:
volume -- volume object provided by the Manager
"""
v = self.vmem_vip
LOG.info(_("Creating lun %s (%d GB)"), volume['name'], volume['size'])
# using the defaults for other fields: (container, name, size,
# quantity, nozero, thin, readonly, startnum, blksize)
#
for i in range(3):
self._wait_for_lockstate()
resp = v.lun.create_lun(self.container, volume['name'],
volume['size'], 1, "0", "0", "w", 1, 512)
if resp['code'] == 0 and not 'try again later' in resp['message']:
break
if resp['code'] != 0 or 'try again later' in resp['message']:
raise exception.Error(_('Failed to create LUN: %d, %s')
% (resp['code'], resp['message']))
def _delete_lun(self, volume):
"""
Deletes a lun.
The equivalent CLI command is "no lun create container
<container_name> name <lun_name>"
Arguments:
volume -- volume object provided by the Manager
"""
v = self.vmem_vip
LOG.info(_("Deleting lun %s"), volume['name'])
for i in range(3):
self._wait_for_lockstate()
resp = v.lun.bulk_delete_luns(self.container, volume['name'])
if resp['code'] == 0 and not 'try again later' in resp['message']:
break
if resp['code'] != 0 or 'try again later' in resp['message']:
raise exception.Error(_('Failed to delete LUN: %d, %s')
% (resp['code'], resp['message']))
def _create_iscsi_target(self, volume):
"""
Creates a new target for use in exporting a lun
Openstack does not yet support multipathing. We still create
HA targets but we pick a single random target for the
Openstack infrastructure to use. This at least allows us to
evenly distribute LUN connections across the storage cluster.
The equivalent CLI commands are "iscsi target create
<target_name>" and "iscsi target bind <target_name> to
<ip_of_mg_eth_intf>".
Arguments:
volume -- volume object provided by the Manager
Returns:
reference to randomly selected target object
"""
v = self.vmem_vip
target_name = self._get_short_name(volume['name'])
LOG.info(_("Creating iscsi target %s"), target_name)
resp = v.iscsi.create_iscsi_target(target_name)
if resp['code'] != 0:
raise exception.Error(_('Failed to create iscsi target: %d, %s')
% (resp['code'], resp['message']))
resp = self.vmem_mga.iscsi.bind_ip_to_target(
target_name, self.gateway_iscsi_ip_addresses_mga)
if resp['code'] != 0:
raise exception.Error(_("Failed to bind iSCSI targets: %d, %s")
% (resp['code'], resp['message']))
resp = self.vmem_mgb.iscsi.bind_ip_to_target(
target_name, self.gateway_iscsi_ip_addresses_mgb)
if resp['code'] != 0:
raise exception.Error(_("Failed to bind iSCSI targets: %d, %s")
% (resp['code'], resp['message']))
return self.array_info[random.randint(0, len(self.array_info)-1)]
def _delete_iscsi_target(self, volume):
"""
Deletes the iscsi target for a lun
iSCSI targets must be deleted from each gateway separately.
The CLI equivalent is "no iscsi target create <target_name>".
Arguments:
volume -- volume object provided by the Manager
"""
v = self.vmem_vip
target_name = self._get_short_name(volume['name'])
# TODO (rdl): afterglow+ does not require the user to manually
# delete both iscsi target bindings before deleting the target
#
LOG.info(_("Deleting iscsi target for %s"), target_name)
resp = self.vmem_mga.iscsi.unbind_ip_from_target(
target_name, self.gateway_iscsi_ip_addresses_mga)
if resp['code'] != 0:
raise exception.Error(_("Failed to unbind iSCSI targets: %d, %s")
% (resp['code'], resp['message']))
resp = self.vmem_mgb.iscsi.unbind_ip_from_target(
target_name, self.gateway_iscsi_ip_addresses_mgb)
if resp['code'] != 0:
raise exception.Error(_("Failed to unbind iSCSI targets: %d, %s")
% (resp['code'], resp['message']))
resp = v.iscsi.delete_iscsi_target(target_name)
if resp['code'] != 0:
raise exception.Error(_('Failed to delete iSCSI target: %d, %s')
% (resp['code'], resp['message']))
def _export_lun(self, volume):
"""
Generates the export configuration for the given volume
The equivalent CLI command is "lun export container
<container_name> name <lun_name>"
Arguments:
volume -- volume object provided by the Manager
Returns:
lun_id -- the LUN ID assigned by the backend
"""
v = self.vmem_vip
target_name = self._get_short_name(volume['name'])
# TODO(rdl): new encryption code in afterglow requires that lun state nodes
# for encryption exist before running the export or else the export will fail
# on the backend (via /vshare/state/local/container/%s/lun/%s/encrypted)
#
LOG.info(_("Exporting lun %s"), volume['name'])
resp = v.lun.export_lun(self.container, volume['name'], target_name,
FLAGS.gateway_iscsi_igroup_name, -1)
if resp['code'] != 0:
raise exception.Error(_('LUN export failed: %d, %s')
% (resp['code'], resp['message']))
self._wait_for_exportstate(volume['name'], True)
lun_id = self._get_lun_id(self.container, volume['name'], target_name,
FLAGS.gateway_iscsi_igroup_name)
return lun_id
def _unexport_lun(self, volume):
"""
Removes the export configuration for the given volume.
The equivalent CLI command is "no lun export container
<container_name> name <lun_name>"
Arguments:
volume -- volume object provided by the Manager
"""
v = self.vmem_vip
LOG.info(_("Unexporting lun %s"), volume['name'])
resp = v.lun.unexport_lun(self.container, volume['name'],
"all", "all", -1)
if resp['code'] != 0:
raise exception.Error(_("LUN unexport failed: %d, %s")
% (resp['code'], resp['message']))
self._wait_for_exportstate(volume['name'], False)
def _add_igroup_member(self, connector):
"""
Add an initiator to the openstack igroup so it can see exports.
The equivalent CLI command is "igroup addto name <igroup_name>
initiators <initiator_name>"
Arguments:
connector -- connector object provided by the Manager
"""
v = self.vmem_vip
LOG.info(_("Adding initiator %s to igroup"), connector['initiator'])
resp = v.igroup.add_initiators(FLAGS.gateway_iscsi_igroup_name,
connector['initiator'])
if resp['code'] != 0:
raise exception.Error(_('Failed to add igroup member: %d, %s')
% (resp['code'], resp['message']))
def _remove_igroup_member(self, connector):
"""
Removes an initiator to the openstack igroup.
The equivalent CLI command is "no igroup addto name
<igroup_name> initiators <initiator_name>".
Arguments:
connector -- connector object passed from the manager
"""
v = self.vmem_vip
# do not remove the initiator from the igroup if it still has
# any active sessions on the backend
#
ids = v.basic.get_node_values('/vshare/state/global/*')
for i in ids:
bn = "/vshare/state/global/%d/target/iscsi/**" % ids[i]
iscsi_targets = v.basic.get_node_values(bn)
for t in iscsi_targets:
if iscsi_targets[t] == connector['initiator']:
return
LOG.info(_("Removing initiator %s from igroup"), connector['initiator'])
resp = v.igroup.delete_initiators(FLAGS.gateway_iscsi_igroup_name,
connector['initiator'])
if resp['code'] != 0 and resp['code'] != 14036:
# -code 14036, message 'Igroup <igroup> doesn't include
# initiator <initiator>'
#
raise exception.Error(_('Failed to remove igroup member: %s, %s')
% (resp['code'], resp['message']))
def _update_stats(self):
data = {}
total_gb = 'unknown'
alloc_gb = 'unknown'
free_gb = 'unknown'
backend_name = 'unknown'
vendor_name = 'Violin'
v = self.vmem_vip
bn1 = "/vshare/state/global/1/container/%s/total_bytes" % self.container
bn2 = "/vshare/state/global/1/container/%s/alloc_bytes" % self.container
bn3 = "/media/state/array/%s/chassis/system/type" % self.container
bn4 = "/hwinfo/state/system_mfr"
resp = v.basic.get_node_values([bn1, bn2, bn3, bn4])
if len(resp.keys()) == 4:
total_gb = resp[bn1] / 1024 / 1024 / 1024
alloc_gb = resp[bn2] / 1024 / 1024 / 1024
free_gb = total_gb - alloc_gb
backend_name = resp[bn3]
vendor_name = resp[bn4]
data['volume_backend_name'] = backend_name
data['vendor_name'] = vendor_name
data['driver_version'] = '1.0'
data['storage_protocol'] = 'iSCSI'
data['total_capacity_gb'] = total_gb
data['free_capacity_gb'] = free_gb
data['reserved_percentage'] = 0
data['QoS_support'] = False
self.stats = data
def _login(self, force=False):
"""
Get new api creds from the backend, only if needed.
Arguments:
force -- re-login on all sessions regardless of last login time
Returns:
True if sessions were refreshed, false otherwise.
"""
now = time.time()
if abs(now - self.session_start_time) >= self.session_timeout or \
force == True:
self.vmem_vip.basic.login()
self.vmem_mga.basic.login()
self.vmem_mgb.basic.login()
self.session_start_time = now
return True
return False
def _get_lun_id(self, container_name, volume_name, target_name, igroup_name):
"""
Queries the gateway to find the lun id for the exported volume.
Arguments:
container_name -- backend array flash container name
volume_name -- LUN to query
target_name -- iSCSI target associated with the LUN
igroup_name -- igroup associated with the LUN
Returns:
LUN ID for the exported lun as an integer.
"""
vip = self.vmem_vip.basic
bn = "/vshare/config/export/container/%s/lun/%s/target/%s/initiator/%s/lun_id" \
% (container_name, volume_name, target_name, igroup_name)
resp = vip.get_node_values(bn)
return resp[bn]
def _get_short_name(self, volume_name):
"""
Creates a vSHARE-compatible iSCSI target name.
The Folsom-style volume names are prefix(7) + uuid(36), which
is too long for vSHARE for target names. To keep things
simple we can just truncate the name to 32 chars.
Arguments:
volume_name -- name of volume/lun
Returns:
Shortened volume name as a string.
"""
return volume_name[:32]
def _iscsi_location(self, ip, port, iqn, lun=None):
"""
Create a properly formatted provider_location string.
Arguments:
ip -- iSCSI target IP address
port -- iSCSI target service port
iqn -- iSCSI target IQN
lun -- ID of the exported LUN
Returns:
provider_location as a formatted string.
"""
# the main driver.py _get_iscsi_properties() function has
# broken field parsing for the location string made here. We
# work around this by putting a blank space for the third
# field
#
return "%s:%s,%s%s %s" % (ip, port, " ", iqn, lun)
def _wait_for_exportstate(self, volume_name, state=False):
"""
Polls volume's export configuration root.
XG sets/queries following a request to create or delete a
lun export may fail on the backend if vshared is still
processing the export action. We can check whether it is
done by polling the export binding for a lun to
ensure it is created or deleted.
Arguments:
volume_name -- name of volume to be polled
state -- True to poll for existence, False for lack of
Returns:
True if the export state was eventually found, false otherwise.
"""
status = False
vip = self.vmem_vip.basic
# TODO (rdl): this implementation only waits on the master, but
# may need to additionally wait for the standby to finish the
# config sync
#
bn = "/vshare/config/export/container/%s/lun/%s" \
% (self.container, volume_name)
for i in range(30):
resp = vip.get_node_values(bn)
if state and len(resp.keys()):
status = True
break
elif (not state) and (not len(resp.keys())):
break
else:
time.sleep(1)
return status
def _wait_for_lockstate(self):
"""
Polls configured backend LVM lock.
Lun deletion will fail on the backend if vshared is still busy
deleting a lun from a previous request. We can check whether
it is 'ready' by polling the LVM lockstate for each gateway.
"""
vip = self.vmem_vip.basic
opts1 = [ XGNode('container', 'string', self.container),
XGNode('port', 'uint8', 1),
XGNode('dev_id', 'string', self.device_id) ]
opts2 = [ XGNode('container', 'string', self.container),
XGNode('port', 'uint8', 2),
XGNode('dev_id', 'string', self.device_id) ]
for i in range(30):
resp1 = vip.perform_action('/vshare/actions/vlock/lockstate', opts1)
resp2 = vip.perform_action('/vshare/actions/vlock/lockstate', opts2)
if resp1['message'][0] == '0' and resp2['message'][0]:
break
else:
time.sleep(1)
def _get_active_iscsi_ips(self, mg_conn):
"""
Get a list of gateway IP addresses that can be used for iSCSI.
Arguments:
mg_conn -- active XG connection to one of the gateways
Returns:
active_gw_iscsi_ips -- list of IP addresses
"""
active_gw_iscsi_ips = []
interfaces_to_skip = [ 'lo', 'vlan10', 'eth1', 'eth2', 'eth3' ]
bn = "/net/interface/config/*"
intf_list = mg_conn.basic.get_node_values(bn)
for i in intf_list:
do_skip = False
for s in interfaces_to_skip:
if intf_list[i] == s:
do_skip = True
break
if not do_skip:
bn1 = "/net/interface/state/%s/addr/ipv4/1/ip" % intf_list[i]
bn2 = "/net/interface/state/%s/flags/link_up" % intf_list[i]
resp = mg_conn.basic.get_node_values([bn1, bn2])
if len(resp.keys()) == 2 and resp[bn2] == True:
active_gw_iscsi_ips.append(resp[bn1])
return active_gw_iscsi_ips
def _get_hostname(self, mg_to_query):
"""
Get the hostname of one of the mgs (hostname is used in IQN).
If the remote query fails then fall back to using the hostname
provided in the cinder configuration file.
Arguments:
mg_to_query -- name of gateway to query 'mga' or 'mgb'
Returns: hostname -- hostname as a string
"""
hostname = FLAGS.gateway_vip
conn = self.vmem_vip.basic
if mg_to_query == "mga":
hostname = FLAGS.gateway_mga
conn = self.vmem_mga.basic
elif mg_to_query == "mgb":
hostname = FLAGS.gateway_mgb
conn = self.vmem_mgb.basic
ret_dict = conn.get_node_values("/system/hostname")
if ret_dict:
hostname = ret_dict.items()[0][1]
else:
LOG.debug(_("Unable to fetch gateway hostname for %s"), mg_to_query)
return hostname
| |
"""Implements the Astropy TestRunner which is a thin wrapper around py.test."""
import inspect
import os
import glob
import copy
import shlex
import sys
import tempfile
import warnings
import importlib
from collections import OrderedDict
from importlib.util import find_spec
from ..config.paths import set_temp_config, set_temp_cache
from ..utils import wraps, find_current_module
from ..utils.exceptions import AstropyWarning, AstropyDeprecationWarning
__all__ = ['TestRunner', 'TestRunnerBase', 'keyword']
def _has_test_dependencies(): # pragma: no cover
# Using the test runner will not work without these dependencies, but
# pytest-openfiles is optional, so it's not listed here.
required = ['pytest', 'pytest_remotedata', 'pytest_doctestplus']
for module in required:
spec = find_spec(module)
# Checking loader accounts for packages that were uninstalled
if spec is None or spec.loader is None:
return False
return True
class keyword:
"""
A decorator to mark a method as keyword argument for the ``TestRunner``.
Parameters
----------
default_value : `object`
The default value for the keyword argument. (Default: `None`)
priority : `int`
keyword argument methods are executed in order of descending priority.
"""
def __init__(self, default_value=None, priority=0):
self.default_value = default_value
self.priority = priority
def __call__(self, f):
def keyword(*args, **kwargs):
return f(*args, **kwargs)
keyword._default_value = self.default_value
keyword._priority = self.priority
# Set __doc__ explicitly here rather than using wraps because we want
# to keep the function name as keyword so we can inspect it later.
keyword.__doc__ = f.__doc__
return keyword
class TestRunnerBase:
"""
The base class for the TestRunner.
A test runner can be constructed by creating a subclass of this class and
defining 'keyword' methods. These are methods that have the
`~astropy.tests.runner.keyword` decorator, these methods are used to
construct allowed keyword arguments to the
`~astropy.tests.runner.TestRunnerBase.run_tests` method as a way to allow
customization of individual keyword arguments (and associated logic)
without having to re-implement the whole
`~astropy.tests.runner.TestRunnerBase.run_tests` method.
Examples
--------
A simple keyword method::
class MyRunner(TestRunnerBase):
@keyword('default_value'):
def spam(self, spam, kwargs):
\"\"\"
spam : `str`
The parameter description for the run_tests docstring.
\"\"\"
# Return value must be a list with a CLI parameter for pytest.
return ['--spam={}'.format(spam)]
"""
def __init__(self, base_path):
self.base_path = os.path.abspath(base_path)
def __new__(cls, *args, **kwargs):
# Before constructing the class parse all the methods that have been
# decorated with ``keyword``.
# The objective of this method is to construct a default set of keyword
# arguments to the ``run_tests`` method. It does this by inspecting the
# methods of the class for functions with the name ``keyword`` which is
# the name of the decorator wrapping function. Once it has created this
# dictionary, it also formats the docstring of ``run_tests`` to be
# comprised of the docstrings for the ``keyword`` methods.
# To add a keyword argument to the ``run_tests`` method, define a new
# method decorated with ``@keyword`` and with the ``self, name, kwargs``
# signature.
# Get all 'function' members as the wrapped methods are functions
functions = inspect.getmembers(cls, predicate=inspect.isfunction)
# Filter out anything that's not got the name 'keyword'
keywords = filter(lambda func: func[1].__name__ == 'keyword', functions)
# Sort all keywords based on the priority flag.
sorted_keywords = sorted(keywords, key=lambda x: x[1]._priority, reverse=True)
cls.keywords = OrderedDict()
doc_keywords = ""
for name, func in sorted_keywords:
# Here we test if the function has been overloaded to return
# NotImplemented which is the way to disable arguments on
# subclasses. If it has been disabled we need to remove it from the
# default keywords dict. We do it in the try except block because
# we do not have access to an instance of the class, so this is
# going to error unless the method is just doing `return
# NotImplemented`.
try:
# Second argument is False, as it is normally a bool.
# The other two are placeholders for objects.
if func(None, False, None) is NotImplemented:
continue
except Exception:
pass
# Construct the default kwargs dict and docstring
cls.keywords[name] = func._default_value
if func.__doc__:
doc_keywords += ' '*8
doc_keywords += func.__doc__.strip()
doc_keywords += '\n\n'
cls.run_tests.__doc__ = cls.RUN_TESTS_DOCSTRING.format(keywords=doc_keywords)
return super(TestRunnerBase, cls).__new__(cls)
def _generate_args(self, **kwargs):
# Update default values with passed kwargs
# but don't modify the defaults
keywords = copy.deepcopy(self.keywords)
keywords.update(kwargs)
# Iterate through the keywords (in order of priority)
args = []
for keyword in keywords.keys():
func = getattr(self, keyword)
result = func(keywords[keyword], keywords)
# Allow disabling of options in a subclass
if result is NotImplemented:
raise TypeError("run_tests() got an unexpected keyword argument {}".format(keyword))
# keyword methods must return a list
if not isinstance(result, list):
raise TypeError("{} keyword method must return a list".format(keyword))
args += result
return args
RUN_TESTS_DOCSTRING = \
"""
Run the tests for the package.
Parameters
----------
{keywords}
See Also
--------
pytest.main : This method builds arguments for and then calls this function.
"""
def run_tests(self, **kwargs):
# The following option will include eggs inside a .eggs folder in
# sys.path when running the tests. This is possible so that when
# runnning python setup.py test, test dependencies installed via e.g.
# tests_requires are available here. This is not an advertised option
# since it is only for internal use
if kwargs.pop('add_local_eggs_to_path', False):
# Add each egg to sys.path individually
for egg in glob.glob(os.path.join('.eggs', '*.egg')):
sys.path.insert(0, egg)
# We now need to force reload pkg_resources in case any pytest
# plugins were added above, so that their entry points are picked up
import pkg_resources
importlib.reload(pkg_resources)
if not _has_test_dependencies(): # pragma: no cover
msg = "Test dependencies are missing. You should install the 'pytest-astropy' package."
raise RuntimeError(msg)
# The docstring for this method is defined as a class variable.
# This allows it to be built for each subclass in __new__.
# Don't import pytest until it's actually needed to run the tests
import pytest
# Raise error for undefined kwargs
allowed_kwargs = set(self.keywords.keys())
passed_kwargs = set(kwargs.keys())
if not passed_kwargs.issubset(allowed_kwargs):
wrong_kwargs = list(passed_kwargs.difference(allowed_kwargs))
raise TypeError("run_tests() got an unexpected keyword argument {}".format(wrong_kwargs[0]))
args = self._generate_args(**kwargs)
if 'plugins' not in self.keywords or self.keywords['plugins'] is None:
self.keywords['plugins'] = []
# Make plugins available to test runner without registering them
self.keywords['plugins'].extend([
'astropy.tests.plugins.display',
'astropy.tests.plugins.config'
])
# override the config locations to not make a new directory nor use
# existing cache or config
astropy_config = tempfile.mkdtemp('astropy_config')
astropy_cache = tempfile.mkdtemp('astropy_cache')
# Have to use nested with statements for cross-Python support
# Note, using these context managers here is superfluous if the
# config_dir or cache_dir options to py.test are in use, but it's
# also harmless to nest the contexts
with set_temp_config(astropy_config, delete=True):
with set_temp_cache(astropy_cache, delete=True):
return pytest.main(args=args, plugins=self.keywords['plugins'])
@classmethod
def make_test_runner_in(cls, path):
"""
Constructs a `TestRunner` to run in the given path, and returns a
``test()`` function which takes the same arguments as
`TestRunner.run_tests`.
The returned ``test()`` function will be defined in the module this
was called from. This is used to implement the ``astropy.test()``
function (or the equivalent for affiliated packages).
"""
runner = cls(path)
@wraps(runner.run_tests, ('__doc__',), exclude_args=('self',))
def test(**kwargs):
return runner.run_tests(**kwargs)
module = find_current_module(2)
if module is not None:
test.__module__ = module.__name__
# A somewhat unusual hack, but delete the attached __wrapped__
# attribute--although this is normally used to tell if the function
# was wrapped with wraps, on some version of Python this is also
# used to determine the signature to display in help() which is
# not useful in this case. We don't really care in this case if the
# function was wrapped either
if hasattr(test, '__wrapped__'):
del test.__wrapped__
return test
class TestRunner(TestRunnerBase):
"""
A test runner for astropy tests
"""
# Increase priority so this warning is displayed first.
@keyword(priority=1000)
def coverage(self, coverage, kwargs):
if coverage:
warnings.warn(
"The coverage option is ignored on run_tests, since it "
"can not be made to work in that context. Use "
"'python setup.py test --coverage' instead.",
AstropyWarning)
return []
# test_path depends on self.package_path so make sure this runs before
# test_path.
@keyword(priority=1)
def package(self, package, kwargs):
"""
package : str, optional
The name of a specific package to test, e.g. 'io.fits' or 'utils'.
If nothing is specified all default Astropy tests are run.
"""
if package is None:
self.package_path = self.base_path
else:
self.package_path = os.path.join(self.base_path,
package.replace('.', os.path.sep))
if not os.path.isdir(self.package_path):
raise ValueError('Package not found: {0}'.format(package))
if not kwargs['test_path']:
return [self.package_path]
return []
@keyword()
def test_path(self, test_path, kwargs):
"""
test_path : str, optional
Specify location to test by path. May be a single file or
directory. Must be specified absolutely or relative to the
calling directory.
"""
all_args = []
# Ensure that the package kwarg has been run.
self.package(kwargs['package'], kwargs)
if test_path:
base, ext = os.path.splitext(test_path)
if ext in ('.rst', ''):
if kwargs['docs_path'] is None:
# This shouldn't happen from "python setup.py test"
raise ValueError(
"Can not test .rst files without a docs_path "
"specified.")
abs_docs_path = os.path.abspath(kwargs['docs_path'])
abs_test_path = os.path.abspath(
os.path.join(abs_docs_path, os.pardir, test_path))
common = os.path.commonprefix((abs_docs_path, abs_test_path))
if os.path.exists(abs_test_path) and common == abs_docs_path:
# Turn on the doctest_rst plugin
all_args.append('--doctest-rst')
test_path = abs_test_path
if not (os.path.isdir(test_path) or ext in ('.py', '.rst')):
raise ValueError("Test path must be a directory or a path to "
"a .py or .rst file")
return all_args + [test_path]
return []
@keyword()
def args(self, args, kwargs):
"""
args : str, optional
Additional arguments to be passed to ``pytest.main`` in the ``args``
keyword argument.
"""
if args:
return shlex.split(args, posix=not sys.platform.startswith('win'))
return []
@keyword()
def plugins(self, plugins, kwargs):
"""
plugins : list, optional
Plugins to be passed to ``pytest.main`` in the ``plugins`` keyword
argument.
"""
return []
@keyword()
def verbose(self, verbose, kwargs):
"""
verbose : bool, optional
Convenience option to turn on verbose output from py.test. Passing
True is the same as specifying ``-v`` in ``args``.
"""
if verbose:
return ['-v']
return []
@keyword()
def pastebin(self, pastebin, kwargs):
"""
pastebin : ('failed', 'all', None), optional
Convenience option for turning on py.test pastebin output. Set to
'failed' to upload info for failed tests, or 'all' to upload info
for all tests.
"""
if pastebin is not None:
if pastebin in ['failed', 'all']:
return ['--pastebin={0}'.format(pastebin)]
else:
raise ValueError("pastebin should be 'failed' or 'all'")
return []
@keyword(default_value='none')
def remote_data(self, remote_data, kwargs):
"""
remote_data : {'none', 'astropy', 'any'}, optional
Controls whether to run tests marked with @pytest.mark.remote_data. This can be
set to run no tests with remote data (``none``), only ones that use
data from http://data.astropy.org (``astropy``), or all tests that
use remote data (``any``). The default is ``none``.
"""
if remote_data is True:
remote_data = 'any'
elif remote_data is False:
remote_data = 'none'
elif remote_data not in ('none', 'astropy', 'any'):
warnings.warn("The remote_data option should be one of "
"none/astropy/any (found {0}). For backward-compatibility, "
"assuming 'any', but you should change the option to be "
"one of the supported ones to avoid issues in "
"future.".format(remote_data),
AstropyDeprecationWarning)
remote_data = 'any'
return ['--remote-data={0}'.format(remote_data)]
@keyword()
def pep8(self, pep8, kwargs):
"""
pep8 : bool, optional
Turn on PEP8 checking via the pytest-pep8 plugin and disable normal
tests. Same as specifying ``--pep8 -k pep8`` in ``args``.
"""
if pep8:
try:
import pytest_pep8 # pylint: disable=W0611
except ImportError:
raise ImportError('PEP8 checking requires pytest-pep8 plugin: '
'http://pypi.python.org/pypi/pytest-pep8')
else:
return ['--pep8', '-k', 'pep8']
return []
@keyword()
def pdb(self, pdb, kwargs):
"""
pdb : bool, optional
Turn on PDB post-mortem analysis for failing tests. Same as
specifying ``--pdb`` in ``args``.
"""
if pdb:
return ['--pdb']
return []
@keyword()
def open_files(self, open_files, kwargs):
"""
open_files : bool, optional
Fail when any tests leave files open. Off by default, because
this adds extra run time to the test suite. Requires the
``psutil`` package.
"""
if open_files:
if kwargs['parallel'] != 0:
raise SystemError(
"open file detection may not be used in conjunction with "
"parallel testing.")
try:
import psutil # pylint: disable=W0611
except ImportError:
raise SystemError(
"open file detection requested, but psutil package "
"is not installed.")
return ['--open-files']
print("Checking for unclosed files")
return []
@keyword(0)
def parallel(self, parallel, kwargs):
"""
parallel : int, optional
When provided, run the tests in parallel on the specified
number of CPUs. If parallel is negative, it will use the all
the cores on the machine. Requires the ``pytest-xdist`` plugin.
"""
if parallel != 0:
try:
from xdist import plugin # noqa
except ImportError:
raise SystemError(
"running tests in parallel requires the pytest-xdist package")
return ['-n', str(parallel)]
return []
@keyword()
def docs_path(self, docs_path, kwargs):
"""
docs_path : str, optional
The path to the documentation .rst files.
"""
if docs_path is not None and not kwargs['skip_docs']:
if kwargs['package'] is not None:
docs_path = os.path.join(
docs_path, kwargs['package'].replace('.', os.path.sep))
if not os.path.exists(docs_path):
warnings.warn(
"Can not test .rst docs, since docs path "
"({0}) does not exist.".format(docs_path))
docs_path = None
if docs_path and not kwargs['skip_docs'] and not kwargs['test_path']:
return [docs_path, '--doctest-rst']
return []
@keyword()
def skip_docs(self, skip_docs, kwargs):
"""
skip_docs : `bool`, optional
When `True`, skips running the doctests in the .rst files.
"""
# Skip docs is a bool used by docs_path only.
return []
@keyword()
def repeat(self, repeat, kwargs):
"""
repeat : `int`, optional
If set, specifies how many times each test should be run. This is
useful for diagnosing sporadic failures.
"""
if repeat:
return ['--repeat={0}'.format(repeat)]
return []
# Override run_tests for astropy-specific fixes
def run_tests(self, **kwargs):
# This prevents cyclical import problems that make it
# impossible to test packages that define Table types on their
# own.
from ..table import Table # pylint: disable=W0611
return super(TestRunner, self).run_tests(**kwargs)
| |
"""
sentry.web.frontend.groups
~~~~~~~~~~~~~~~~~~~~~~~~~~
Contains views for the "Events" section of Sentry.
TODO: Move all events.py views into here, and rename this file to events.
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, division
import datetime
import re
import logging
from django.core.urlresolvers import reverse
from django.db.utils import DatabaseError
from django.http import (
HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect, Http404
)
from django.shortcuts import get_object_or_404
from django.utils import timezone
from sentry import app
from sentry.auth import access
from sentry.constants import (
SORT_OPTIONS, MEMBER_USER, DEFAULT_SORT_OPTION, EVENTS_PER_PAGE
)
from sentry.db.models import create_or_update
from sentry.models import (
Project, Group, GroupMeta, Event, Activity, EventMapping, TagKey, GroupSeen
)
from sentry.permissions import can_create_projects
from sentry.plugins import plugins
from sentry.search.utils import parse_query
from sentry.utils import json
from sentry.utils.cursors import Cursor
from sentry.utils.dates import parse_date
from sentry.web.decorators import has_access, has_group_access, login_required
from sentry.web.forms import NewNoteForm
from sentry.web.helpers import render_to_response, group_is_public
uuid_re = re.compile(r'^[a-z0-9]{32}$', re.I)
event_re = re.compile(r'^(?P<event_id>[a-z0-9]{32})\$(?P<checksum>[a-z0-9]{32})$', re.I)
def _get_group_list(request, project):
query_kwargs = {
'project': project,
}
status = request.GET.get('status', '0')
if status:
query_kwargs['status'] = int(status)
if request.user.is_authenticated() and request.GET.get('bookmarks'):
query_kwargs['bookmarked_by'] = request.user
if request.user.is_authenticated() and request.GET.get('assigned'):
query_kwargs['assigned_to'] = request.user
sort_by = request.GET.get('sort') or request.session.get('streamsort')
if sort_by is None:
sort_by = DEFAULT_SORT_OPTION
# Save last sort in session
if sort_by != request.session.get('streamsort'):
request.session['streamsort'] = sort_by
query_kwargs['sort_by'] = sort_by
tags = {}
for tag_key in TagKey.objects.all_keys(project):
if request.GET.get(tag_key):
tags[tag_key] = request.GET[tag_key]
if tags:
query_kwargs['tags'] = tags
else:
query_kwargs['tags'] = {}
date_from = request.GET.get('df')
time_from = request.GET.get('tf')
date_to = request.GET.get('dt')
time_to = request.GET.get('tt')
date_filter = request.GET.get('date_type')
today = timezone.now()
# date format is Y-m-d
if any(x is not None for x in [date_from, time_from, date_to, time_to]):
date_from, date_to = parse_date(date_from, time_from), parse_date(date_to, time_to)
else:
date_from = today - datetime.timedelta(days=5)
date_to = None
query_kwargs['date_from'] = date_from
query_kwargs['date_to'] = date_to
if date_filter:
query_kwargs['date_filter'] = date_filter
cursor = request.GET.get('cursor')
if cursor:
try:
query_kwargs['cursor'] = Cursor.from_string(cursor)
except ValueError:
# XXX(dcramer): ideally we'd error, but this is an internal API so
# we'd rather just throw it away
logging.info('Throwing away invalid cursor: %s', cursor)
query_kwargs['limit'] = EVENTS_PER_PAGE
query = request.GET.get('query', '')
if query is not None:
for key, value in parse_query(query, request.user).iteritems():
if key == 'tags':
query_kwargs['tags'].update(value)
else:
query_kwargs[key] = value
results = app.search.query(**query_kwargs)
return {
'event_list': results[:EVENTS_PER_PAGE],
'date_from': date_from,
'date_to': date_to,
'today': today,
'sort': sort_by,
'date_type': date_filter,
'next_cursor': results.next,
'prev_cursor': results.prev,
}
def render_with_group_context(group, template, context, request=None,
event=None, is_public=False):
context.update({
'team': group.project.team,
'organization': group.project.organization,
'project': group.project,
'group': group,
})
if request and request.user.is_authenticated():
context['ACCESS'] = access.from_user(
user=request.user,
organization=group.organization,
).to_django_context()
else:
context['ACCESS'] = access.DEFAULT.to_django_context()
if event:
if event.id:
# HACK(dcramer): work around lack of unique sorting on datetime
base_qs = Event.objects.filter(
group=event.group_id,
).exclude(id=event.id)
try:
next_event = sorted(
base_qs.filter(
datetime__gte=event.datetime
).order_by('datetime')[0:5],
key=lambda x: (x.datetime, x.id)
)[0]
except IndexError:
next_event = None
try:
prev_event = sorted(
base_qs.filter(
datetime__lte=event.datetime,
).order_by('-datetime')[0:5],
key=lambda x: (x.datetime, x.id),
reverse=True
)[0]
except IndexError:
prev_event = None
else:
next_event = None
prev_event = None
if not is_public:
extra_data = event.data.get('extra', {})
if not isinstance(extra_data, dict):
extra_data = {}
context.update({
'tags': event.get_tags(),
'json_data': extra_data,
})
context.update({
'event': event,
'version_data': event.data.get('modules', None),
'next_event': next_event,
'prev_event': prev_event,
})
return render_to_response(template, context, request)
@login_required
def redirect_to_group(request, project_id, group_id):
group = get_object_or_404(Group, id=group_id)
return HttpResponseRedirect(reverse('sentry-group', kwargs={
'project_id': group.project.slug,
'organization_slug': group.project.organization.slug,
'group_id': group.id,
}))
@login_required
@has_access
def dashboard(request, organization, team):
project_list = list(Project.objects.filter(team=team))
if not project_list and can_create_projects(request.user, team=team):
url = reverse('sentry-create-project', args=[team.organization.slug])
return HttpResponseRedirect(url + '?team=' + team.slug)
for project in project_list:
project.team = team
return render_to_response('sentry/dashboard.html', {
'organization': team.organization,
'team': team,
'project_list': project_list,
'ACCESS': access.from_user(
user=request.user,
organization=organization,
).to_django_context(),
}, request)
@login_required
@has_access
def wall_display(request, organization, team):
project_list = list(Project.objects.filter(team=team))
for project in project_list:
project.team = team
return render_to_response('sentry/wall.html', {
'team': team,
'organization': team.organization,
'project_list': project_list,
'ACCESS': access.from_user(
user=request.user,
organization=organization,
).to_django_context(),
}, request)
@login_required
@has_access
def group_list(request, organization, project):
query = request.GET.get('query')
if query and uuid_re.match(query):
# Forward to event if it exists
try:
group_id = EventMapping.objects.filter(
project=project, event_id=query
).values_list('group', flat=True)[0]
except IndexError:
pass
else:
return HttpResponseRedirect(reverse('sentry-group', kwargs={
'project_id': project.slug,
'organization_slug': project.organization.slug,
'group_id': group_id,
}))
response = _get_group_list(
request=request,
project=project,
)
if isinstance(response, HttpResponse):
return response
# XXX: this is duplicate in _get_group_list
sort_label = SORT_OPTIONS[response['sort']]
has_realtime = not request.GET.get('cursor')
query_dict = request.GET.copy()
if 'cursor' in query_dict:
del query_dict['cursor']
cursorless_query_string = query_dict.urlencode()
GroupMeta.objects.populate_cache(response['event_list'])
return render_to_response('sentry/groups/group_list.html', {
'team': project.team,
'organization': organization,
'project': project,
'from_date': response['date_from'],
'to_date': response['date_to'],
'date_type': response['date_type'],
'has_realtime': has_realtime,
'event_list': response['event_list'],
'prev_cursor': response['prev_cursor'],
'next_cursor': response['next_cursor'],
'today': response['today'],
'sort': response['sort'],
'query': query,
'cursorless_query_string': cursorless_query_string,
'sort_label': sort_label,
'SORT_OPTIONS': SORT_OPTIONS,
'ACCESS': access.from_user(
user=request.user,
organization=organization,
).to_django_context(),
}, request)
def group(request, organization_slug, project_id, group_id, event_id=None):
# TODO(dcramer): remove in 7.1 release
# Handle redirects from team_slug/project_slug to org_slug/project_slug
try:
group = Group.objects.get(id=group_id)
except Group.DoesNotExist:
raise Http404
if group.project.slug != project_id:
raise Http404
if group.organization.slug == organization_slug:
return group_details(
request=request,
organization_slug=organization_slug,
project_id=project_id,
group_id=group_id,
event_id=event_id,
)
if group.team.slug == organization_slug:
if event_id:
url = reverse(
'sentry-group-event',
args=[group.organization.slug, project_id, group_id, event_id],
)
else:
url = reverse(
'sentry-group',
args=[group.organization.slug, project_id, group_id],
)
return HttpResponsePermanentRedirect(url)
raise Http404
@has_group_access(allow_public=True)
def group_details(request, organization, project, group, event_id=None):
# It's possible that a message would not be created under certain
# circumstances (such as a post_save signal failing)
if event_id:
event = get_object_or_404(group.event_set, id=event_id)
else:
event = group.get_latest_event() or Event()
Event.objects.bind_nodes([event], 'data')
GroupMeta.objects.populate_cache([group])
# bind params to group in case they get hit
event.group = group
event.project = project
if request.POST.get('o') == 'note' and request.user.is_authenticated():
add_note_form = NewNoteForm(request.POST)
if add_note_form.is_valid():
add_note_form.save(event, request.user)
return HttpResponseRedirect(request.path)
else:
add_note_form = NewNoteForm()
if request.user.is_authenticated() and project.has_access(request.user):
# update that the user has seen this group
try:
create_or_update(
GroupSeen,
group=group,
user=request.user,
project=project,
values={
'last_seen': timezone.now(),
}
)
except DatabaseError as exc:
logging.warn(unicode(exc), exc_info=True)
activity_qs = Activity.objects.filter(
group=group,
).order_by('-datetime').select_related('user')
# filter out dupe activity items
activity_items = set()
activity = []
for item in activity_qs[:20]:
sig = (item.event_id, item.type, item.ident, item.user_id)
# TODO: we could just generate a signature (hash(text)) for notes
# so there's no special casing
if item.type == Activity.NOTE:
activity.append(item)
elif sig not in activity_items:
activity_items.add(sig)
activity.append(item)
activity.append(Activity(
project=project, group=group, type=Activity.FIRST_SEEN,
datetime=group.first_seen))
# trim to latest 5
activity = activity[:7]
seen_by = sorted(filter(lambda ls: ls[0] != request.user and ls[0].email, [
(gs.user, gs.last_seen)
for gs in GroupSeen.objects.filter(
group=group
).select_related('user')
]), key=lambda ls: ls[1], reverse=True)
seen_by_extra = len(seen_by) - 5
if seen_by_extra < 0:
seen_by_extra = 0
seen_by_faces = seen_by[:5]
context = {
'add_note_form': add_note_form,
'page': 'details',
'activity': activity,
'seen_by': seen_by,
'seen_by_faces': seen_by_faces,
'seen_by_extra': seen_by_extra,
}
is_public = group_is_public(group, request.user)
if is_public:
template = 'sentry/groups/public_details.html'
context['PROJECT_LIST'] = [project]
else:
template = 'sentry/groups/details.html'
return render_with_group_context(
group, template, context, request,
event=event, is_public=is_public)
@has_group_access
def group_tag_list(request, organization, project, group):
def percent(total, this):
return int(this / total * 100)
GroupMeta.objects.populate_cache([group])
queryset = TagKey.objects.filter(
project=project,
key__in=[t['key'] for t in group.get_tags()],
)
# O(N) db access
tag_list = []
for tag_key in queryset:
tag_list.append((tag_key, [
(value, times_seen, percent(group.times_seen, times_seen))
for (value, times_seen, first_seen, last_seen)
in group.get_unique_tags(tag_key.key)[:5]
], group.get_unique_tags(tag_key.key).count()))
return render_with_group_context(group, 'sentry/groups/tag_list.html', {
'page': 'tag_list',
'tag_list': tag_list,
}, request)
@has_group_access
def group_tag_details(request, organization, project, group, tag_name):
GroupMeta.objects.populate_cache([group])
sort = request.GET.get('sort')
if sort == 'date':
order_by = '-last_seen'
elif sort == 'new':
order_by = '-first_seen'
else:
order_by = '-times_seen'
return render_with_group_context(group, 'sentry/plugins/bases/tag/index.html', {
'title': tag_name.replace('_', ' ').title(),
'tag_name': tag_name,
'unique_tags': group.get_unique_tags(tag_name, order_by=order_by),
'page': 'tag_details',
}, request)
@has_group_access
def group_event_list(request, organization, project, group):
# TODO: we need the event data to bind after we limit
event_list = group.event_set.all().order_by('-datetime')[:100]
for event in event_list:
event.project = project
event.group = group
GroupMeta.objects.populate_cache([group])
Event.objects.bind_nodes(event_list, 'data')
return render_with_group_context(group, 'sentry/groups/event_list.html', {
'event_list': event_list,
'page': 'event_list',
}, request)
@has_access(MEMBER_USER)
def group_event_details_json(request, organization, project, group_id, event_id_or_latest):
group = get_object_or_404(Group, pk=group_id, project=project)
if event_id_or_latest == 'latest':
# It's possible that a message would not be created under certain
# circumstances (such as a post_save signal failing)
event = group.get_latest_event() or Event(group=group)
else:
event = get_object_or_404(group.event_set, pk=event_id_or_latest)
Event.objects.bind_nodes([event], 'data')
GroupMeta.objects.populate_cache([group])
return HttpResponse(json.dumps(event.as_dict()), mimetype='application/json')
@login_required
@has_access(MEMBER_USER)
def group_plugin_action(request, organization, project, group_id, slug):
group = get_object_or_404(Group, pk=group_id, project=project)
try:
plugin = plugins.get(slug)
except KeyError:
raise Http404('Plugin not found')
GroupMeta.objects.populate_cache([group])
response = plugin.get_view_response(request, group)
if response:
return response
redirect = request.META.get('HTTP_REFERER') or reverse('sentry-stream', kwargs={
'organization_slug': organization.slug,
'project_id': group.project.slug
})
return HttpResponseRedirect(redirect)
| |
#!/usr/bin/env python
##
# Massimiliano Patacchiola, Plymouth University 2016
#
# This is an example of head pose estimation with solvePnP and dlib face detector.
# It uses the dlib library and openCV.
# To use this example you have to provide an input video file
# and an output path:
# python ex_pnp_pose_estimation_video.py /home/video.mpg ./output.avi
#
import numpy
import cv2
import sys
import os
from deepgaze.face_landmark_detection import faceLandmarkDetection
#For the frontal face detector
import dlib
#Antropometric constant values of the human head.
#Found on wikipedia and on:
# "Head-and-Face Anthropometric Survey of U.S. Respirator Users"
#
#X-Y-Z with X pointing forward and Y on the left.
#The X-Y-Z coordinates used are like the standard
# coordinates of ROS (robotic operative system)
P3D_RIGHT_SIDE = numpy.float32([-100.0, -77.5, -5.0]) #0
P3D_GONION_RIGHT = numpy.float32([-110.0, -77.5, -85.0]) #4
P3D_MENTON = numpy.float32([0.0, 0.0, -122.7]) #8
P3D_GONION_LEFT = numpy.float32([-110.0, 77.5, -85.0]) #12
P3D_LEFT_SIDE = numpy.float32([-100.0, 77.5, -5.0]) #16
P3D_FRONTAL_BREADTH_RIGHT = numpy.float32([-20.0, -56.1, 10.0]) #17
P3D_FRONTAL_BREADTH_LEFT = numpy.float32([-20.0, 56.1, 10.0]) #26
P3D_SELLION = numpy.float32([0.0, 0.0, 0.0]) #27
P3D_NOSE = numpy.float32([21.1, 0.0, -48.0]) #30
P3D_SUB_NOSE = numpy.float32([5.0, 0.0, -52.0]) #33
P3D_RIGHT_EYE = numpy.float32([-20.0, -65.5,-5.0]) #36
P3D_RIGHT_TEAR = numpy.float32([-10.0, -40.5,-5.0]) #39
P3D_LEFT_TEAR = numpy.float32([-10.0, 40.5,-5.0]) #42
P3D_LEFT_EYE = numpy.float32([-20.0, 65.5,-5.0]) #45
#P3D_LIP_RIGHT = numpy.float32([-20.0, 65.5,-5.0]) #48
#P3D_LIP_LEFT = numpy.float32([-20.0, 65.5,-5.0]) #54
P3D_STOMION = numpy.float32([10.0, 0.0, -75.0]) #62
#The points to track
#These points are the ones used by PnP
# to estimate the 3D pose of the face
TRACKED_POINTS = (0, 4, 8, 12, 16, 17, 26, 27, 30, 33, 36, 39, 42, 45, 62)
ALL_POINTS = list(range(0,68)) #Used for debug only
def main():
#Check if some argumentshave been passed
#pass the path of a video
if(len(sys.argv) > 2):
file_path = sys.argv[1]
if(os.path.isfile(file_path)==False):
print("ex_pnp_head_pose_estimation: the file specified does not exist.")
return
else:
#Open the video file
video_capture = cv2.VideoCapture(file_path)
if(video_capture.isOpened() == True): print("ex_pnp_head_pose_estimation: the video source has been opened correctly...")
# Define the codec and create VideoWriter object
#fourcc = cv2.VideoWriter_fourcc(*'XVID')
output_path = sys.argv[2]
fourcc = cv2.cv.CV_FOURCC(*'XVID')
out = cv2.VideoWriter(output_path, fourcc, 20.0, (1280,720))
else:
print("You have to pass as argument the path to a video file and the path to the output file to produce, for example: \n python ex_pnp_pose_estimation_video.py /home/video.mpg ./output.avi")
return
#Create the main window and move it
cv2.namedWindow('Video')
cv2.moveWindow('Video', 20, 20)
#Obtaining the CAM dimension
cam_w = int(video_capture.get(3))
cam_h = int(video_capture.get(4))
#Defining the camera matrix.
#To have better result it is necessary to find the focal
# lenght of the camera. fx/fy are the focal lengths (in pixels)
# and cx/cy are the optical centres. These values can be obtained
# roughly by approximation, for example in a 640x480 camera:
# cx = 640/2 = 320
# cy = 480/2 = 240
# fx = fy = cx/tan(60/2 * pi / 180) = 554.26
c_x = cam_w / 2
c_y = cam_h / 2
f_x = c_x / numpy.tan(60/2 * numpy.pi / 180)
f_y = f_x
#Estimated camera matrix values.
camera_matrix = numpy.float32([[f_x, 0.0, c_x],
[0.0, f_y, c_y],
[0.0, 0.0, 1.0] ])
print("Estimated camera matrix: \n" + str(camera_matrix) + "\n")
#These are the camera matrix values estimated on my webcam with
# the calibration code (see: src/calibration):
#camera_matrix = numpy.float32([[602.10618226, 0.0, 320.27333589],
#[ 0.0, 603.55869786, 229.7537026],
#[ 0.0, 0.0, 1.0] ])
#Distortion coefficients
camera_distortion = numpy.float32([0.0, 0.0, 0.0, 0.0, 0.0])
#Distortion coefficients estimated by calibration
#camera_distortion = numpy.float32([ 0.06232237, -0.41559805, 0.00125389, -0.00402566, 0.04879263])
#This matrix contains the 3D points of the
# 11 landmarks we want to find. It has been
# obtained from antrophometric measurement
# on the human head.
landmarks_3D = numpy.float32([P3D_RIGHT_SIDE,
P3D_GONION_RIGHT,
P3D_MENTON,
P3D_GONION_LEFT,
P3D_LEFT_SIDE,
P3D_FRONTAL_BREADTH_RIGHT,
P3D_FRONTAL_BREADTH_LEFT,
P3D_SELLION,
P3D_NOSE,
P3D_SUB_NOSE,
P3D_RIGHT_EYE,
P3D_RIGHT_TEAR,
P3D_LEFT_TEAR,
P3D_LEFT_EYE,
P3D_STOMION])
#Declaring the two classifiers
#my_cascade = haarCascade("../etc/haarcascade_frontalface_alt.xml", "../etc/haarcascade_profileface.xml")
dlib_landmarks_file = "./shape_predictor_68_face_landmarks.dat"
if(os.path.isfile(dlib_landmarks_file)==False):
print("The dlib landmarks file is missing! Use the following commands to download and unzip: ")
print(">> wget dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2")
print(">> bzip2 -d shape_predictor_68_face_landmarks.dat.bz2")
return
my_detector = faceLandmarkDetection(dlib_landmarks_file)
my_face_detector = dlib.get_frontal_face_detector()
while(True):
# Capture frame-by-frame
ret, frame = video_capture.read()
#gray = cv2.cvtColor(frame[roi_y1:roi_y2, roi_x1:roi_x2], cv2.COLOR_BGR2GRAY)
faces_array = my_face_detector(frame, 1)
print("Total Faces: " + str(len(faces_array)))
for i, pos in enumerate(faces_array):
face_x1 = pos.left()
face_y1 = pos.top()
face_x2 = pos.right()
face_y2 = pos.bottom()
text_x1 = face_x1
text_y1 = face_y1 - 3
cv2.putText(frame, "FACE " + str(i+1), (text_x1,text_y1), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0), 1);
cv2.rectangle(frame,
(face_x1, face_y1),
(face_x2, face_y2),
(0, 255, 0),
2)
landmarks_2D = my_detector.returnLandmarks(frame, face_x1, face_y1, face_x2, face_y2, points_to_return=TRACKED_POINTS)
for point in landmarks_2D:
cv2.circle(frame,( point[0], point[1] ), 2, (0,0,255), -1)
#Applying the PnP solver to find the 3D pose
# of the head from the 2D position of the
# landmarks.
#retval - bool
#rvec - Output rotation vector that, together with tvec, brings
# points from the model coordinate system to the camera coordinate system.
#tvec - Output translation vector.
retval, rvec, tvec = cv2.solvePnP(landmarks_3D,
landmarks_2D,
camera_matrix, camera_distortion)
#Now we project the 3D points into the image plane
#Creating a 3-axis to be used as reference in the image.
axis = numpy.float32([[50,0,0],
[0,50,0],
[0,0,50]])
imgpts, jac = cv2.projectPoints(axis, rvec, tvec, camera_matrix, camera_distortion)
#Drawing the three axis on the image frame.
#The opencv colors are defined as BGR colors such as:
# (a, b, c) >> Blue = a, Green = b and Red = c
#Our axis/color convention is X=R, Y=G, Z=B
sellion_xy = (landmarks_2D[7][0], landmarks_2D[7][1])
cv2.line(frame, sellion_xy, tuple(imgpts[1].ravel()), (0,255,0), 3) #GREEN
cv2.line(frame, sellion_xy, tuple(imgpts[2].ravel()), (255,0,0), 3) #BLUE
cv2.line(frame, sellion_xy, tuple(imgpts[0].ravel()), (0,0,255), 3) #RED
#Writing in the output file
out.write(frame)
#Showing the frame and waiting
# for the exit command
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'): break
#Release the camera
video_capture.release()
print("Bye...")
if __name__ == "__main__":
main()
| |
"""deCONZ services."""
import asyncio
from pydeconz.utils import normalize_bridge_id
import voluptuous as vol
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.entity_registry import (
async_entries_for_config_entry,
async_entries_for_device,
)
from .config_flow import get_master_gateway
from .const import (
CONF_BRIDGE_ID,
DOMAIN,
LOGGER,
NEW_GROUP,
NEW_LIGHT,
NEW_SCENE,
NEW_SENSOR,
)
DECONZ_SERVICES = "deconz_services"
SERVICE_FIELD = "field"
SERVICE_ENTITY = "entity"
SERVICE_DATA = "data"
SERVICE_CONFIGURE_DEVICE = "configure"
SERVICE_CONFIGURE_DEVICE_SCHEMA = vol.All(
vol.Schema(
{
vol.Optional(SERVICE_ENTITY): cv.entity_id,
vol.Optional(SERVICE_FIELD): cv.matches_regex("/.*"),
vol.Required(SERVICE_DATA): dict,
vol.Optional(CONF_BRIDGE_ID): str,
}
),
cv.has_at_least_one_key(SERVICE_ENTITY, SERVICE_FIELD),
)
SERVICE_DEVICE_REFRESH = "device_refresh"
SERVICE_REMOVE_ORPHANED_ENTRIES = "remove_orphaned_entries"
SELECT_GATEWAY_SCHEMA = vol.All(vol.Schema({vol.Optional(CONF_BRIDGE_ID): str}))
async def async_setup_services(hass):
"""Set up services for deCONZ integration."""
if hass.data.get(DECONZ_SERVICES, False):
return
hass.data[DECONZ_SERVICES] = True
async def async_call_deconz_service(service_call):
"""Call correct deCONZ service."""
service = service_call.service
service_data = service_call.data
if service == SERVICE_CONFIGURE_DEVICE:
await async_configure_service(hass, service_data)
elif service == SERVICE_DEVICE_REFRESH:
await async_refresh_devices_service(hass, service_data)
elif service == SERVICE_REMOVE_ORPHANED_ENTRIES:
await async_remove_orphaned_entries_service(hass, service_data)
hass.services.async_register(
DOMAIN,
SERVICE_CONFIGURE_DEVICE,
async_call_deconz_service,
schema=SERVICE_CONFIGURE_DEVICE_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_DEVICE_REFRESH,
async_call_deconz_service,
schema=SELECT_GATEWAY_SCHEMA,
)
hass.services.async_register(
DOMAIN,
SERVICE_REMOVE_ORPHANED_ENTRIES,
async_call_deconz_service,
schema=SELECT_GATEWAY_SCHEMA,
)
async def async_unload_services(hass):
"""Unload deCONZ services."""
if not hass.data.get(DECONZ_SERVICES):
return
hass.data[DECONZ_SERVICES] = False
hass.services.async_remove(DOMAIN, SERVICE_CONFIGURE_DEVICE)
hass.services.async_remove(DOMAIN, SERVICE_DEVICE_REFRESH)
hass.services.async_remove(DOMAIN, SERVICE_REMOVE_ORPHANED_ENTRIES)
async def async_configure_service(hass, data):
"""Set attribute of device in deCONZ.
Entity is used to resolve to a device path (e.g. '/lights/1').
Field is a string representing either a full path
(e.g. '/lights/1/state') when entity is not specified, or a
subpath (e.g. '/state') when used together with entity.
Data is a json object with what data you want to alter
e.g. data={'on': true}.
{
"field": "/lights/1/state",
"data": {"on": true}
}
See Dresden Elektroniks REST API documentation for details:
http://dresden-elektronik.github.io/deconz-rest-doc/rest/
"""
gateway = get_master_gateway(hass)
if CONF_BRIDGE_ID in data:
gateway = hass.data[DOMAIN][normalize_bridge_id(data[CONF_BRIDGE_ID])]
field = data.get(SERVICE_FIELD, "")
entity_id = data.get(SERVICE_ENTITY)
data = data[SERVICE_DATA]
if entity_id:
try:
field = gateway.deconz_ids[entity_id] + field
except KeyError:
LOGGER.error("Could not find the entity %s", entity_id)
return
await gateway.api.request("put", field, json=data)
async def async_refresh_devices_service(hass, data):
"""Refresh available devices from deCONZ."""
gateway = get_master_gateway(hass)
if CONF_BRIDGE_ID in data:
gateway = hass.data[DOMAIN][normalize_bridge_id(data[CONF_BRIDGE_ID])]
gateway.ignore_state_updates = True
await gateway.api.refresh_state()
gateway.ignore_state_updates = False
gateway.async_add_device_callback(NEW_GROUP, force=True)
gateway.async_add_device_callback(NEW_LIGHT, force=True)
gateway.async_add_device_callback(NEW_SCENE, force=True)
gateway.async_add_device_callback(NEW_SENSOR, force=True)
async def async_remove_orphaned_entries_service(hass, data):
"""Remove orphaned deCONZ entries from device and entity registries."""
gateway = get_master_gateway(hass)
if CONF_BRIDGE_ID in data:
gateway = hass.data[DOMAIN][normalize_bridge_id(data[CONF_BRIDGE_ID])]
device_registry, entity_registry = await asyncio.gather(
hass.helpers.device_registry.async_get_registry(),
hass.helpers.entity_registry.async_get_registry(),
)
entity_entries = async_entries_for_config_entry(
entity_registry, gateway.config_entry.entry_id
)
entities_to_be_removed = []
devices_to_be_removed = [
entry.id
for entry in device_registry.devices.values()
if gateway.config_entry.entry_id in entry.config_entries
]
# Don't remove the Gateway host entry
gateway_host = device_registry.async_get_device(
connections={(CONNECTION_NETWORK_MAC, gateway.api.config.mac)},
identifiers=set(),
)
if gateway_host.id in devices_to_be_removed:
devices_to_be_removed.remove(gateway_host.id)
# Don't remove the Gateway service entry
gateway_service = device_registry.async_get_device(
identifiers={(DOMAIN, gateway.api.config.bridgeid)}, connections=set()
)
if gateway_service.id in devices_to_be_removed:
devices_to_be_removed.remove(gateway_service.id)
# Don't remove devices belonging to available events
for event in gateway.events:
if event.device_id in devices_to_be_removed:
devices_to_be_removed.remove(event.device_id)
for entry in entity_entries:
# Don't remove available entities
if entry.unique_id in gateway.entities[entry.domain]:
# Don't remove devices with available entities
if entry.device_id in devices_to_be_removed:
devices_to_be_removed.remove(entry.device_id)
continue
# Remove entities that are not available
entities_to_be_removed.append(entry.entity_id)
# Remove unavailable entities
for entity_id in entities_to_be_removed:
entity_registry.async_remove(entity_id)
# Remove devices that don't belong to any entity
for device_id in devices_to_be_removed:
if (
len(
async_entries_for_device(
entity_registry, device_id, include_disabled_entities=True
)
)
== 0
):
device_registry.async_remove_device(device_id)
| |
# -*- coding: utf-8 -*-
"""
github3.gists.gist
==================
This module contains the Gist class alone for simplicity.
"""
from __future__ import unicode_literals
from json import dumps
from ..models import GitHubCore
from ..decorators import requires_auth
from .comment import GistComment
from .file import GistFile
from .history import GistHistory
from ..users import User
class Gist(GitHubCore):
"""This object holds all the information returned by Github about a gist.
With it you can comment on or fork the gist (assuming you are
authenticated), edit or delete the gist (assuming you own it). You can
also "star" or "unstar" the gist (again assuming you have authenticated).
Two gist instances can be checked like so::
g1 == g2
g1 != g2
And is equivalent to::
g1.id == g2.id
g1.id != g2.id
See also: http://developer.github.com/v3/gists/
"""
def _update_attributes(self, data):
#: Number of comments on this gist
self.comments_count = data.get('comments', 0)
#: Unique id for this gist.
self.id = '{0}'.format(data.get('id', ''))
#: Description of the gist
self.description = data.get('description', '')
# e.g. https://api.github.com/gists/1
self._api = data.get('url', '')
#: URL of this gist at Github, e.g., https://gist.github.com/1
self.html_url = data.get('html_url')
#: Boolean describing if the gist is public or private
self.public = data.get('public')
self._forks = data.get('forks', [])
#: Git URL to pull this gist, e.g., git://gist.github.com/1.git
self.git_pull_url = data.get('git_pull_url', '')
#: Git URL to push to gist, e.g., git@gist.github.com/1.git
self.git_push_url = data.get('git_push_url', '')
#: datetime object representing when the gist was created.
self.created_at = self._strptime(data.get('created_at'))
#: datetime object representing the last time this gist was updated.
self.updated_at = self._strptime(data.get('updated_at'))
owner = data.get('owner')
#: :class:`User <github3.users.User>` object representing the owner of
#: the gist.
self.owner = User(owner, self) if owner else None
self._files = [GistFile(data['files'][f]) for f in data['files']]
#: History of this gist, list of
#: :class:`GistHistory <github3.gists.history.GistHistory>`
self.history = [GistHistory(h, self) for h in data.get('history', [])]
# New urls
#: Comments URL (not a template)
self.comments_url = data.get('comments_url', '')
#: Commits URL (not a template)
self.commits_url = data.get('commits_url', '')
#: Forks URL (not a template)
self.forks_url = data.get('forks_url', '')
#: Whether the content of this Gist has been truncated or not
self.truncated = data.get('truncated')
def __str__(self):
return self.id
def _repr(self):
return '<Gist [{0}]>'.format(self.id)
@requires_auth
def create_comment(self, body):
"""Create a comment on this gist.
:param str body: (required), body of the comment
:returns: :class:`GistComment <github3.gists.comment.GistComment>`
"""
json = None
if body:
url = self._build_url('comments', base_url=self._api)
json = self._json(self._post(url, data={'body': body}), 201)
return self._instance_or_null(GistComment, json)
@requires_auth
def delete(self):
"""Delete this gist.
:returns: bool -- whether the deletion was successful
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def edit(self, description='', files={}):
"""Edit this gist.
:param str description: (optional), description of the gist
:param dict files: (optional), files that make up this gist; the
key(s) should be the file name(s) and the values should be another
(optional) dictionary with (optional) keys: 'content' and
'filename' where the former is the content of the file and the
latter is the new name of the file.
:returns: bool -- whether the edit was successful
"""
data = {}
json = None
if description:
data['description'] = description
if files:
data['files'] = files
if data:
json = self._json(self._patch(self._api, data=dumps(data)), 200)
if json:
self._update_attributes(json)
return True
return False
@requires_auth
def fork(self):
"""Fork this gist.
:returns: :class:`Gist <Gist>` if successful, ``None`` otherwise
"""
url = self._build_url('forks', base_url=self._api)
json = self._json(self._post(url), 201)
return self._instance_or_null(Gist, json)
@requires_auth
def is_starred(self):
"""Check to see if this gist is starred by the authenticated user.
:returns: bool -- True if it is starred, False otherwise
"""
url = self._build_url('star', base_url=self._api)
return self._boolean(self._get(url), 204, 404)
def comments(self, number=-1, etag=None):
"""Iterate over comments on this gist.
:param int number: (optional), number of comments to iterate over.
Default: -1 will iterate over all comments on the gist
:param str etag: (optional), ETag from a previous request to the same
endpoint
:returns: generator of
:class:`GistComment <github3.gists.comment.GistComment>`
"""
url = self._build_url('comments', base_url=self._api)
return self._iter(int(number), url, GistComment, etag=etag)
def commits(self, number=-1, etag=None):
"""Iterate over the commits on this gist.
These commits will be requested from the API and should be the same as
what is in ``Gist.history``.
.. versionadded:: 0.6
.. versionchanged:: 0.9
Added param ``etag``.
:param int number: (optional), number of commits to iterate over.
Default: -1 will iterate over all commits associated with this
gist.
:param str etag: (optional), ETag from a previous request to this
endpoint.
:returns: generator of
:class:`GistHistory <github3.gists.history.GistHistory>`
"""
url = self._build_url('commits', base_url=self._api)
return self._iter(int(number), url, GistHistory)
def files(self):
"""Iterator over the files stored in this gist.
:returns: generator of :class`GistFile <github3.gists.file.GistFile>`
"""
return iter(self._files)
def forks(self, number=-1, etag=None):
"""Iterator of forks of this gist.
.. versionchanged:: 0.9
Added params ``number`` and ``etag``.
:param int number: (optional), number of forks to iterate over.
Default: -1 will iterate over all forks of this gist.
:param str etag: (optional), ETag from a previous request to this
endpoint.
:returns: generator of :class:`Gist <Gist>`
"""
url = self._build_url('forks', base_url=self._api)
return self._iter(int(number), url, Gist, etag=etag)
@requires_auth
def star(self):
"""Star this gist.
:returns: bool -- True if successful, False otherwise
"""
url = self._build_url('star', base_url=self._api)
return self._boolean(self._put(url), 204, 404)
@requires_auth
def unstar(self):
"""Un-star this gist.
:returns: bool -- True if successful, False otherwise
"""
url = self._build_url('star', base_url=self._api)
return self._boolean(self._delete(url), 204, 404)
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'EstadoActual'
db.create_table(u'produccion_cafe_finca_estadoactual', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'produccion_cafe_finca', ['EstadoActual'])
# Adding model 'AreaCafe'
db.create_table(u'produccion_cafe_finca_areacafe', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('estado', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['produccion_cafe_finca.EstadoActual'])),
('once', self.gf('django.db.models.fields.FloatField')()),
('doce', self.gf('django.db.models.fields.FloatField')()),
('trece', self.gf('django.db.models.fields.FloatField')()),
('catorse', self.gf('django.db.models.fields.FloatField')()),
('encuesta', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.Encuesta'])),
))
db.send_create_signal(u'produccion_cafe_finca', ['AreaCafe'])
# Adding model 'Plantio'
db.create_table(u'produccion_cafe_finca_plantio', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'produccion_cafe_finca', ['Plantio'])
# Adding model 'Variedades'
db.create_table(u'produccion_cafe_finca_variedades', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'produccion_cafe_finca', ['Variedades'])
# Adding model 'VariedadEdadRoya'
db.create_table(u'produccion_cafe_finca_variedadedadroya', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre_plantio', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['produccion_cafe_finca.Plantio'])),
('area', self.gf('django.db.models.fields.FloatField')()),
('produccion_2012', self.gf('django.db.models.fields.FloatField')()),
('produccion_2013', self.gf('django.db.models.fields.FloatField')()),
('produccion_2014', self.gf('django.db.models.fields.FloatField')()),
('nivel_roya', self.gf('django.db.models.fields.FloatField')()),
('encuesta', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.Encuesta'])),
))
db.send_create_signal(u'produccion_cafe_finca', ['VariedadEdadRoya'])
# Adding M2M table for field variedades on 'VariedadEdadRoya'
db.create_table(u'produccion_cafe_finca_variedadedadroya_variedades', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('variedadedadroya', models.ForeignKey(orm[u'produccion_cafe_finca.variedadedadroya'], null=False)),
('variedades', models.ForeignKey(orm[u'produccion_cafe_finca.variedades'], null=False))
))
db.create_unique(u'produccion_cafe_finca_variedadedadroya_variedades', ['variedadedadroya_id', 'variedades_id'])
# Adding model 'ResistenteRoya'
db.create_table(u'produccion_cafe_finca_resistenteroya', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'produccion_cafe_finca', ['ResistenteRoya'])
# Adding model 'Semilla'
db.create_table(u'produccion_cafe_finca_semilla', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'produccion_cafe_finca', ['Semilla'])
# Adding model 'DecideSembrar'
db.create_table(u'produccion_cafe_finca_decidesembrar', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'produccion_cafe_finca', ['DecideSembrar'])
# Adding model 'Criterios'
db.create_table(u'produccion_cafe_finca_criterios', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'produccion_cafe_finca', ['Criterios'])
# Adding model 'ProduccionVivero'
db.create_table(u'produccion_cafe_finca_produccionvivero', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('vivero_finca', self.gf('django.db.models.fields.IntegerField')()),
('plantas_vivero', self.gf('django.db.models.fields.FloatField')()),
('plantas_finca', self.gf('django.db.models.fields.FloatField')()),
('plantas_vender', self.gf('django.db.models.fields.FloatField')()),
('plantas_injertadas', self.gf('django.db.models.fields.FloatField')()),
('edad_planta', self.gf('django.db.models.fields.FloatField')()),
('costo_planta_caturra', self.gf('django.db.models.fields.FloatField')()),
('costo_planta_catimore', self.gf('django.db.models.fields.FloatField')()),
('costo_planta_hibridas', self.gf('django.db.models.fields.FloatField')()),
('pagar_caturra', self.gf('django.db.models.fields.FloatField')()),
('pagar_catimore', self.gf('django.db.models.fields.FloatField')()),
('pagar_hibrida', self.gf('django.db.models.fields.FloatField')()),
('encuesta', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.Encuesta'])),
))
db.send_create_signal(u'produccion_cafe_finca', ['ProduccionVivero'])
# Adding M2M table for field variedad on 'ProduccionVivero'
db.create_table(u'produccion_cafe_finca_produccionvivero_variedad', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('produccionvivero', models.ForeignKey(orm[u'produccion_cafe_finca.produccionvivero'], null=False)),
('variedades', models.ForeignKey(orm[u'produccion_cafe_finca.variedades'], null=False))
))
db.create_unique(u'produccion_cafe_finca_produccionvivero_variedad', ['produccionvivero_id', 'variedades_id'])
# Adding M2M table for field resistente_roya on 'ProduccionVivero'
db.create_table(u'produccion_cafe_finca_produccionvivero_resistente_roya', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('produccionvivero', models.ForeignKey(orm[u'produccion_cafe_finca.produccionvivero'], null=False)),
('resistenteroya', models.ForeignKey(orm[u'produccion_cafe_finca.resistenteroya'], null=False))
))
db.create_unique(u'produccion_cafe_finca_produccionvivero_resistente_roya', ['produccionvivero_id', 'resistenteroya_id'])
# Adding M2M table for field consigue_semilla on 'ProduccionVivero'
db.create_table(u'produccion_cafe_finca_produccionvivero_consigue_semilla', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('produccionvivero', models.ForeignKey(orm[u'produccion_cafe_finca.produccionvivero'], null=False)),
('semilla', models.ForeignKey(orm[u'produccion_cafe_finca.semilla'], null=False))
))
db.create_unique(u'produccion_cafe_finca_produccionvivero_consigue_semilla', ['produccionvivero_id', 'semilla_id'])
# Adding M2M table for field disponible on 'ProduccionVivero'
db.create_table(u'produccion_cafe_finca_produccionvivero_disponible', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('produccionvivero', models.ForeignKey(orm[u'produccion_cafe_finca.produccionvivero'], null=False)),
('variedades', models.ForeignKey(orm[u'produccion_cafe_finca.variedades'], null=False))
))
db.create_unique(u'produccion_cafe_finca_produccionvivero_disponible', ['produccionvivero_id', 'variedades_id'])
# Adding M2M table for field decide on 'ProduccionVivero'
db.create_table(u'produccion_cafe_finca_produccionvivero_decide', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('produccionvivero', models.ForeignKey(orm[u'produccion_cafe_finca.produccionvivero'], null=False)),
('decidesembrar', models.ForeignKey(orm[u'produccion_cafe_finca.decidesembrar'], null=False))
))
db.create_unique(u'produccion_cafe_finca_produccionvivero_decide', ['produccionvivero_id', 'decidesembrar_id'])
# Adding M2M table for field criterio on 'ProduccionVivero'
db.create_table(u'produccion_cafe_finca_produccionvivero_criterio', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('produccionvivero', models.ForeignKey(orm[u'produccion_cafe_finca.produccionvivero'], null=False)),
('criterios', models.ForeignKey(orm[u'produccion_cafe_finca.criterios'], null=False))
))
db.create_unique(u'produccion_cafe_finca_produccionvivero_criterio', ['produccionvivero_id', 'criterios_id'])
# Adding model 'Manejos'
db.create_table(u'produccion_cafe_finca_manejos', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'produccion_cafe_finca', ['Manejos'])
# Adding model 'ManejoCafetales'
db.create_table(u'produccion_cafe_finca_manejocafetales', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('fecha', self.gf('django.db.models.fields.IntegerField')()),
('encuesta', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.Encuesta'])),
))
db.send_create_signal(u'produccion_cafe_finca', ['ManejoCafetales'])
# Adding M2M table for field manejo_cafeto on 'ManejoCafetales'
db.create_table(u'produccion_cafe_finca_manejocafetales_manejo_cafeto', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('manejocafetales', models.ForeignKey(orm[u'produccion_cafe_finca.manejocafetales'], null=False)),
('manejos', models.ForeignKey(orm[u'produccion_cafe_finca.manejos'], null=False))
))
db.create_unique(u'produccion_cafe_finca_manejocafetales_manejo_cafeto', ['manejocafetales_id', 'manejos_id'])
# Adding M2M table for field manejo_sombra on 'ManejoCafetales'
db.create_table(u'produccion_cafe_finca_manejocafetales_manejo_sombra', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('manejocafetales', models.ForeignKey(orm[u'produccion_cafe_finca.manejocafetales'], null=False)),
('manejos', models.ForeignKey(orm[u'produccion_cafe_finca.manejos'], null=False))
))
db.create_unique(u'produccion_cafe_finca_manejocafetales_manejo_sombra', ['manejocafetales_id', 'manejos_id'])
# Adding M2M table for field fertilizante_suelo on 'ManejoCafetales'
db.create_table(u'produccion_cafe_finca_manejocafetales_fertilizante_suelo', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('manejocafetales', models.ForeignKey(orm[u'produccion_cafe_finca.manejocafetales'], null=False)),
('manejos', models.ForeignKey(orm[u'produccion_cafe_finca.manejos'], null=False))
))
db.create_unique(u'produccion_cafe_finca_manejocafetales_fertilizante_suelo', ['manejocafetales_id', 'manejos_id'])
# Adding M2M table for field fertilizante_foliares on 'ManejoCafetales'
db.create_table(u'produccion_cafe_finca_manejocafetales_fertilizante_foliares', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('manejocafetales', models.ForeignKey(orm[u'produccion_cafe_finca.manejocafetales'], null=False)),
('manejos', models.ForeignKey(orm[u'produccion_cafe_finca.manejos'], null=False))
))
db.create_unique(u'produccion_cafe_finca_manejocafetales_fertilizante_foliares', ['manejocafetales_id', 'manejos_id'])
# Adding M2M table for field fungicidas on 'ManejoCafetales'
db.create_table(u'produccion_cafe_finca_manejocafetales_fungicidas', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('manejocafetales', models.ForeignKey(orm[u'produccion_cafe_finca.manejocafetales'], null=False)),
('manejos', models.ForeignKey(orm[u'produccion_cafe_finca.manejos'], null=False))
))
db.create_unique(u'produccion_cafe_finca_manejocafetales_fungicidas', ['manejocafetales_id', 'manejos_id'])
# Adding M2M table for field insecticidas on 'ManejoCafetales'
db.create_table(u'produccion_cafe_finca_manejocafetales_insecticidas', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('manejocafetales', models.ForeignKey(orm[u'produccion_cafe_finca.manejocafetales'], null=False)),
('manejos', models.ForeignKey(orm[u'produccion_cafe_finca.manejos'], null=False))
))
db.create_unique(u'produccion_cafe_finca_manejocafetales_insecticidas', ['manejocafetales_id', 'manejos_id'])
# Adding M2M table for field nematicidas on 'ManejoCafetales'
db.create_table(u'produccion_cafe_finca_manejocafetales_nematicidas', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('manejocafetales', models.ForeignKey(orm[u'produccion_cafe_finca.manejocafetales'], null=False)),
('manejos', models.ForeignKey(orm[u'produccion_cafe_finca.manejos'], null=False))
))
db.create_unique(u'produccion_cafe_finca_manejocafetales_nematicidas', ['manejocafetales_id', 'manejos_id'])
# Adding model 'MesesManejoCafe'
db.create_table(u'produccion_cafe_finca_mesesmanejocafe', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('fecha', self.gf('django.db.models.fields.IntegerField')()),
('encuesta', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.Encuesta'])),
))
db.send_create_signal(u'produccion_cafe_finca', ['MesesManejoCafe'])
# Adding M2M table for field mes_manejo_cafeto on 'MesesManejoCafe'
db.create_table(u'produccion_cafe_finca_mesesmanejocafe_mes_manejo_cafeto', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mesesmanejocafe', models.ForeignKey(orm[u'produccion_cafe_finca.mesesmanejocafe'], null=False)),
('meses', models.ForeignKey(orm[u'encuesta.meses'], null=False))
))
db.create_unique(u'produccion_cafe_finca_mesesmanejocafe_mes_manejo_cafeto', ['mesesmanejocafe_id', 'meses_id'])
# Adding M2M table for field mes_manejo_sombra on 'MesesManejoCafe'
db.create_table(u'produccion_cafe_finca_mesesmanejocafe_mes_manejo_sombra', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mesesmanejocafe', models.ForeignKey(orm[u'produccion_cafe_finca.mesesmanejocafe'], null=False)),
('meses', models.ForeignKey(orm[u'encuesta.meses'], null=False))
))
db.create_unique(u'produccion_cafe_finca_mesesmanejocafe_mes_manejo_sombra', ['mesesmanejocafe_id', 'meses_id'])
# Adding M2M table for field mes_fertilizante_suelo on 'MesesManejoCafe'
db.create_table(u'produccion_cafe_finca_mesesmanejocafe_mes_fertilizante_suelo', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mesesmanejocafe', models.ForeignKey(orm[u'produccion_cafe_finca.mesesmanejocafe'], null=False)),
('meses', models.ForeignKey(orm[u'encuesta.meses'], null=False))
))
db.create_unique(u'produccion_cafe_finca_mesesmanejocafe_mes_fertilizante_suelo', ['mesesmanejocafe_id', 'meses_id'])
# Adding M2M table for field mes_fertilizante_foliares on 'MesesManejoCafe'
db.create_table(u'produccion_cafe_finca_mesesmanejocafe_mes_fertilizante_foliares', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mesesmanejocafe', models.ForeignKey(orm[u'produccion_cafe_finca.mesesmanejocafe'], null=False)),
('meses', models.ForeignKey(orm[u'encuesta.meses'], null=False))
))
db.create_unique(u'produccion_cafe_finca_mesesmanejocafe_mes_fertilizante_foliares', ['mesesmanejocafe_id', 'meses_id'])
# Adding M2M table for field mes_fungicidas on 'MesesManejoCafe'
db.create_table(u'produccion_cafe_finca_mesesmanejocafe_mes_fungicidas', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mesesmanejocafe', models.ForeignKey(orm[u'produccion_cafe_finca.mesesmanejocafe'], null=False)),
('meses', models.ForeignKey(orm[u'encuesta.meses'], null=False))
))
db.create_unique(u'produccion_cafe_finca_mesesmanejocafe_mes_fungicidas', ['mesesmanejocafe_id', 'meses_id'])
# Adding M2M table for field mes_insecticidas on 'MesesManejoCafe'
db.create_table(u'produccion_cafe_finca_mesesmanejocafe_mes_insecticidas', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mesesmanejocafe', models.ForeignKey(orm[u'produccion_cafe_finca.mesesmanejocafe'], null=False)),
('meses', models.ForeignKey(orm[u'encuesta.meses'], null=False))
))
db.create_unique(u'produccion_cafe_finca_mesesmanejocafe_mes_insecticidas', ['mesesmanejocafe_id', 'meses_id'])
# Adding M2M table for field mes_nematicidas on 'MesesManejoCafe'
db.create_table(u'produccion_cafe_finca_mesesmanejocafe_mes_nematicidas', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('mesesmanejocafe', models.ForeignKey(orm[u'produccion_cafe_finca.mesesmanejocafe'], null=False)),
('meses', models.ForeignKey(orm[u'encuesta.meses'], null=False))
))
db.create_unique(u'produccion_cafe_finca_mesesmanejocafe_mes_nematicidas', ['mesesmanejocafe_id', 'meses_id'])
# Adding model 'TiposInsumos'
db.create_table(u'produccion_cafe_finca_tiposinsumos', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'produccion_cafe_finca', ['TiposInsumos'])
# Adding model 'NombreTipos'
db.create_table(u'produccion_cafe_finca_nombretipos', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'produccion_cafe_finca', ['NombreTipos'])
# Adding model 'UsoInsumos'
db.create_table(u'produccion_cafe_finca_usoinsumos', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('tipo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['produccion_cafe_finca.TiposInsumos'])),
('nombre', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['produccion_cafe_finca.NombreTipos'])),
('aplicaciones', self.gf('django.db.models.fields.FloatField')()),
('cantidad', self.gf('django.db.models.fields.FloatField')()),
('encuesta', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.Encuesta'])),
))
db.send_create_signal(u'produccion_cafe_finca', ['UsoInsumos'])
# Adding M2M table for field momento on 'UsoInsumos'
db.create_table(u'produccion_cafe_finca_usoinsumos_momento', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('usoinsumos', models.ForeignKey(orm[u'produccion_cafe_finca.usoinsumos'], null=False)),
('meses', models.ForeignKey(orm[u'encuesta.meses'], null=False))
))
db.create_unique(u'produccion_cafe_finca_usoinsumos_momento', ['usoinsumos_id', 'meses_id'])
# Adding model 'Opciones'
db.create_table(u'produccion_cafe_finca_opciones', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'produccion_cafe_finca', ['Opciones'])
# Adding model 'UsoOpcionesAgroecologica'
db.create_table(u'produccion_cafe_finca_usoopcionesagroecologica', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('opcion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['produccion_cafe_finca.Opciones'])),
('nivel', self.gf('django.db.models.fields.IntegerField')()),
('encuesta', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.Encuesta'])),
))
db.send_create_signal(u'produccion_cafe_finca', ['UsoOpcionesAgroecologica'])
# Adding model 'BeneficioSeco'
db.create_table(u'produccion_cafe_finca_beneficioseco', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'produccion_cafe_finca', ['BeneficioSeco'])
# Adding model 'CalidadCafe'
db.create_table(u'produccion_cafe_finca_calidadcafe', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'produccion_cafe_finca', ['CalidadCafe'])
# Adding model 'Beneficiado'
db.create_table(u'produccion_cafe_finca_beneficiado', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cortes', self.gf('django.db.models.fields.IntegerField')()),
('separan', self.gf('django.db.models.fields.IntegerField')()),
('despulpan_fermentan', self.gf('django.db.models.fields.IntegerField')()),
('estado', self.gf('django.db.models.fields.IntegerField')()),
('calibran', self.gf('django.db.models.fields.IntegerField')()),
('revisan', self.gf('django.db.models.fields.IntegerField')()),
('despulpar', self.gf('django.db.models.fields.IntegerField')()),
('fermentan', self.gf('django.db.models.fields.IntegerField')()),
('orean', self.gf('django.db.models.fields.IntegerField')()),
('beneficiado_seco', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['produccion_cafe_finca.BeneficioSeco'])),
('calidad', self.gf('django.db.models.fields.IntegerField')()),
('determina_calidad', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['produccion_cafe_finca.CalidadCafe'])),
('precio', self.gf('django.db.models.fields.IntegerField')()),
('cuanto', self.gf('django.db.models.fields.FloatField')()),
('encuesta', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.Encuesta'])),
))
db.send_create_signal(u'produccion_cafe_finca', ['Beneficiado'])
# Adding model 'Comercializacion'
db.create_table(u'produccion_cafe_finca_comercializacion', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('fecha', self.gf('django.db.models.fields.IntegerField')()),
('p_total', self.gf('django.db.models.fields.FloatField')()),
('i_venta_cafe', self.gf('django.db.models.fields.FloatField')()),
('i_precio', self.gf('django.db.models.fields.FloatField')()),
('c_venta', self.gf('django.db.models.fields.FloatField')()),
('c_precio', self.gf('django.db.models.fields.FloatField')()),
('e_venta', self.gf('django.db.models.fields.FloatField')()),
('e_precio', self.gf('django.db.models.fields.FloatField')()),
('encuesta', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.Encuesta'])),
))
db.send_create_signal(u'produccion_cafe_finca', ['Comercializacion'])
# Adding model 'Credito'
db.create_table(u'produccion_cafe_finca_credito', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('fecha', self.gf('django.db.models.fields.IntegerField')()),
('monto', self.gf('django.db.models.fields.FloatField')()),
('cobertura', self.gf('django.db.models.fields.FloatField')()),
('credito_mediano', self.gf('django.db.models.fields.FloatField')()),
('necesidad', self.gf('django.db.models.fields.FloatField')()),
('credito_largo', self.gf('django.db.models.fields.FloatField')()),
('cobertura_necesidad', self.gf('django.db.models.fields.FloatField')()),
('facilidad', self.gf('django.db.models.fields.IntegerField')()),
('encuesta', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['encuesta.Encuesta'])),
))
db.send_create_signal(u'produccion_cafe_finca', ['Credito'])
def backwards(self, orm):
# Deleting model 'EstadoActual'
db.delete_table(u'produccion_cafe_finca_estadoactual')
# Deleting model 'AreaCafe'
db.delete_table(u'produccion_cafe_finca_areacafe')
# Deleting model 'Plantio'
db.delete_table(u'produccion_cafe_finca_plantio')
# Deleting model 'Variedades'
db.delete_table(u'produccion_cafe_finca_variedades')
# Deleting model 'VariedadEdadRoya'
db.delete_table(u'produccion_cafe_finca_variedadedadroya')
# Removing M2M table for field variedades on 'VariedadEdadRoya'
db.delete_table('produccion_cafe_finca_variedadedadroya_variedades')
# Deleting model 'ResistenteRoya'
db.delete_table(u'produccion_cafe_finca_resistenteroya')
# Deleting model 'Semilla'
db.delete_table(u'produccion_cafe_finca_semilla')
# Deleting model 'DecideSembrar'
db.delete_table(u'produccion_cafe_finca_decidesembrar')
# Deleting model 'Criterios'
db.delete_table(u'produccion_cafe_finca_criterios')
# Deleting model 'ProduccionVivero'
db.delete_table(u'produccion_cafe_finca_produccionvivero')
# Removing M2M table for field variedad on 'ProduccionVivero'
db.delete_table('produccion_cafe_finca_produccionvivero_variedad')
# Removing M2M table for field resistente_roya on 'ProduccionVivero'
db.delete_table('produccion_cafe_finca_produccionvivero_resistente_roya')
# Removing M2M table for field consigue_semilla on 'ProduccionVivero'
db.delete_table('produccion_cafe_finca_produccionvivero_consigue_semilla')
# Removing M2M table for field disponible on 'ProduccionVivero'
db.delete_table('produccion_cafe_finca_produccionvivero_disponible')
# Removing M2M table for field decide on 'ProduccionVivero'
db.delete_table('produccion_cafe_finca_produccionvivero_decide')
# Removing M2M table for field criterio on 'ProduccionVivero'
db.delete_table('produccion_cafe_finca_produccionvivero_criterio')
# Deleting model 'Manejos'
db.delete_table(u'produccion_cafe_finca_manejos')
# Deleting model 'ManejoCafetales'
db.delete_table(u'produccion_cafe_finca_manejocafetales')
# Removing M2M table for field manejo_cafeto on 'ManejoCafetales'
db.delete_table('produccion_cafe_finca_manejocafetales_manejo_cafeto')
# Removing M2M table for field manejo_sombra on 'ManejoCafetales'
db.delete_table('produccion_cafe_finca_manejocafetales_manejo_sombra')
# Removing M2M table for field fertilizante_suelo on 'ManejoCafetales'
db.delete_table('produccion_cafe_finca_manejocafetales_fertilizante_suelo')
# Removing M2M table for field fertilizante_foliares on 'ManejoCafetales'
db.delete_table('produccion_cafe_finca_manejocafetales_fertilizante_foliares')
# Removing M2M table for field fungicidas on 'ManejoCafetales'
db.delete_table('produccion_cafe_finca_manejocafetales_fungicidas')
# Removing M2M table for field insecticidas on 'ManejoCafetales'
db.delete_table('produccion_cafe_finca_manejocafetales_insecticidas')
# Removing M2M table for field nematicidas on 'ManejoCafetales'
db.delete_table('produccion_cafe_finca_manejocafetales_nematicidas')
# Deleting model 'MesesManejoCafe'
db.delete_table(u'produccion_cafe_finca_mesesmanejocafe')
# Removing M2M table for field mes_manejo_cafeto on 'MesesManejoCafe'
db.delete_table('produccion_cafe_finca_mesesmanejocafe_mes_manejo_cafeto')
# Removing M2M table for field mes_manejo_sombra on 'MesesManejoCafe'
db.delete_table('produccion_cafe_finca_mesesmanejocafe_mes_manejo_sombra')
# Removing M2M table for field mes_fertilizante_suelo on 'MesesManejoCafe'
db.delete_table('produccion_cafe_finca_mesesmanejocafe_mes_fertilizante_suelo')
# Removing M2M table for field mes_fertilizante_foliares on 'MesesManejoCafe'
db.delete_table('produccion_cafe_finca_mesesmanejocafe_mes_fertilizante_foliares')
# Removing M2M table for field mes_fungicidas on 'MesesManejoCafe'
db.delete_table('produccion_cafe_finca_mesesmanejocafe_mes_fungicidas')
# Removing M2M table for field mes_insecticidas on 'MesesManejoCafe'
db.delete_table('produccion_cafe_finca_mesesmanejocafe_mes_insecticidas')
# Removing M2M table for field mes_nematicidas on 'MesesManejoCafe'
db.delete_table('produccion_cafe_finca_mesesmanejocafe_mes_nematicidas')
# Deleting model 'TiposInsumos'
db.delete_table(u'produccion_cafe_finca_tiposinsumos')
# Deleting model 'NombreTipos'
db.delete_table(u'produccion_cafe_finca_nombretipos')
# Deleting model 'UsoInsumos'
db.delete_table(u'produccion_cafe_finca_usoinsumos')
# Removing M2M table for field momento on 'UsoInsumos'
db.delete_table('produccion_cafe_finca_usoinsumos_momento')
# Deleting model 'Opciones'
db.delete_table(u'produccion_cafe_finca_opciones')
# Deleting model 'UsoOpcionesAgroecologica'
db.delete_table(u'produccion_cafe_finca_usoopcionesagroecologica')
# Deleting model 'BeneficioSeco'
db.delete_table(u'produccion_cafe_finca_beneficioseco')
# Deleting model 'CalidadCafe'
db.delete_table(u'produccion_cafe_finca_calidadcafe')
# Deleting model 'Beneficiado'
db.delete_table(u'produccion_cafe_finca_beneficiado')
# Deleting model 'Comercializacion'
db.delete_table(u'produccion_cafe_finca_comercializacion')
# Deleting model 'Credito'
db.delete_table(u'produccion_cafe_finca_credito')
models = {
u'encuesta.duenofinca': {
'Meta': {'object_name': 'DuenoFinca'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.encuesta': {
'Meta': {'object_name': 'Encuesta'},
'altitud': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'beneficiario': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Organizacion']"}),
'cedula': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'comunidad': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Comunidad']"}),
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'dueno': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.DuenoFinca']"}),
'fecha': ('django.db.models.fields.DateField', [], {}),
'finca': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitud': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'longitud': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'municipio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Entrevistado']"}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'recolector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Recolector']"}),
'sexo': ('django.db.models.fields.IntegerField', [], {})
},
u'encuesta.entrevistado': {
'Meta': {'object_name': 'Entrevistado'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.meses': {
'Meta': {'object_name': 'Meses'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.organizacion': {
'Meta': {'object_name': 'Organizacion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'encuesta.recolector': {
'Meta': {'object_name': 'Recolector'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'lugar.comunidad': {
'Meta': {'object_name': 'Comunidad'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'lugar.departamento': {
'Meta': {'object_name': 'Departamento'},
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.municipio': {
'Meta': {'ordering': "['departamento__nombre']", 'object_name': 'Municipio'},
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'latitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.pais': {
'Meta': {'object_name': 'Pais'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'produccion_cafe_finca.areacafe': {
'Meta': {'object_name': 'AreaCafe'},
'catorse': ('django.db.models.fields.FloatField', [], {}),
'doce': ('django.db.models.fields.FloatField', [], {}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
'estado': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_cafe_finca.EstadoActual']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'once': ('django.db.models.fields.FloatField', [], {}),
'trece': ('django.db.models.fields.FloatField', [], {})
},
u'produccion_cafe_finca.beneficiado': {
'Meta': {'object_name': 'Beneficiado'},
'beneficiado_seco': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_cafe_finca.BeneficioSeco']"}),
'calibran': ('django.db.models.fields.IntegerField', [], {}),
'calidad': ('django.db.models.fields.IntegerField', [], {}),
'cortes': ('django.db.models.fields.IntegerField', [], {}),
'cuanto': ('django.db.models.fields.FloatField', [], {}),
'despulpan_fermentan': ('django.db.models.fields.IntegerField', [], {}),
'despulpar': ('django.db.models.fields.IntegerField', [], {}),
'determina_calidad': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_cafe_finca.CalidadCafe']"}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
'estado': ('django.db.models.fields.IntegerField', [], {}),
'fermentan': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'orean': ('django.db.models.fields.IntegerField', [], {}),
'precio': ('django.db.models.fields.IntegerField', [], {}),
'revisan': ('django.db.models.fields.IntegerField', [], {}),
'separan': ('django.db.models.fields.IntegerField', [], {})
},
u'produccion_cafe_finca.beneficioseco': {
'Meta': {'object_name': 'BeneficioSeco'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'produccion_cafe_finca.calidadcafe': {
'Meta': {'object_name': 'CalidadCafe'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'produccion_cafe_finca.comercializacion': {
'Meta': {'object_name': 'Comercializacion'},
'c_precio': ('django.db.models.fields.FloatField', [], {}),
'c_venta': ('django.db.models.fields.FloatField', [], {}),
'e_precio': ('django.db.models.fields.FloatField', [], {}),
'e_venta': ('django.db.models.fields.FloatField', [], {}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
'fecha': ('django.db.models.fields.IntegerField', [], {}),
'i_precio': ('django.db.models.fields.FloatField', [], {}),
'i_venta_cafe': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'p_total': ('django.db.models.fields.FloatField', [], {})
},
u'produccion_cafe_finca.credito': {
'Meta': {'object_name': 'Credito'},
'cobertura': ('django.db.models.fields.FloatField', [], {}),
'cobertura_necesidad': ('django.db.models.fields.FloatField', [], {}),
'credito_largo': ('django.db.models.fields.FloatField', [], {}),
'credito_mediano': ('django.db.models.fields.FloatField', [], {}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
'facilidad': ('django.db.models.fields.IntegerField', [], {}),
'fecha': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monto': ('django.db.models.fields.FloatField', [], {}),
'necesidad': ('django.db.models.fields.FloatField', [], {})
},
u'produccion_cafe_finca.criterios': {
'Meta': {'object_name': 'Criterios'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'produccion_cafe_finca.decidesembrar': {
'Meta': {'object_name': 'DecideSembrar'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'produccion_cafe_finca.estadoactual': {
'Meta': {'object_name': 'EstadoActual'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'produccion_cafe_finca.manejocafetales': {
'Meta': {'object_name': 'ManejoCafetales'},
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
'fecha': ('django.db.models.fields.IntegerField', [], {}),
'fertilizante_foliares': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'foliares'", 'symmetrical': 'False', 'to': u"orm['produccion_cafe_finca.Manejos']"}),
'fertilizante_suelo': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'suelo'", 'symmetrical': 'False', 'to': u"orm['produccion_cafe_finca.Manejos']"}),
'fungicidas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'fungicida'", 'symmetrical': 'False', 'to': u"orm['produccion_cafe_finca.Manejos']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'insecticidas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'insecticida'", 'symmetrical': 'False', 'to': u"orm['produccion_cafe_finca.Manejos']"}),
'manejo_cafeto': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'cafeto'", 'symmetrical': 'False', 'to': u"orm['produccion_cafe_finca.Manejos']"}),
'manejo_sombra': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'sombra'", 'symmetrical': 'False', 'to': u"orm['produccion_cafe_finca.Manejos']"}),
'nematicidas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'nematicidas'", 'symmetrical': 'False', 'to': u"orm['produccion_cafe_finca.Manejos']"})
},
u'produccion_cafe_finca.manejos': {
'Meta': {'object_name': 'Manejos'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'produccion_cafe_finca.mesesmanejocafe': {
'Meta': {'object_name': 'MesesManejoCafe'},
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
'fecha': ('django.db.models.fields.IntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mes_fertilizante_foliares': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'fertilizante_foliares'", 'symmetrical': 'False', 'to': u"orm['encuesta.Meses']"}),
'mes_fertilizante_suelo': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'fertilizante_suelo'", 'symmetrical': 'False', 'to': u"orm['encuesta.Meses']"}),
'mes_fungicidas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mes_fungicidas'", 'symmetrical': 'False', 'to': u"orm['encuesta.Meses']"}),
'mes_insecticidas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mes_insecticida'", 'symmetrical': 'False', 'to': u"orm['encuesta.Meses']"}),
'mes_manejo_cafeto': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'manejo_cefeto'", 'symmetrical': 'False', 'to': u"orm['encuesta.Meses']"}),
'mes_manejo_sombra': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'manejo_sombra'", 'symmetrical': 'False', 'to': u"orm['encuesta.Meses']"}),
'mes_nematicidas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'mes_nematicidas'", 'symmetrical': 'False', 'to': u"orm['encuesta.Meses']"})
},
u'produccion_cafe_finca.nombretipos': {
'Meta': {'object_name': 'NombreTipos'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'produccion_cafe_finca.opciones': {
'Meta': {'object_name': 'Opciones'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'produccion_cafe_finca.plantio': {
'Meta': {'object_name': 'Plantio'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'produccion_cafe_finca.produccionvivero': {
'Meta': {'object_name': 'ProduccionVivero'},
'consigue_semilla': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'semilla'", 'symmetrical': 'False', 'to': u"orm['produccion_cafe_finca.Semilla']"}),
'costo_planta_catimore': ('django.db.models.fields.FloatField', [], {}),
'costo_planta_caturra': ('django.db.models.fields.FloatField', [], {}),
'costo_planta_hibridas': ('django.db.models.fields.FloatField', [], {}),
'criterio': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'criterio'", 'symmetrical': 'False', 'to': u"orm['produccion_cafe_finca.Criterios']"}),
'decide': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'decide'", 'symmetrical': 'False', 'to': u"orm['produccion_cafe_finca.DecideSembrar']"}),
'disponible': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'disponible'", 'symmetrical': 'False', 'to': u"orm['produccion_cafe_finca.Variedades']"}),
'edad_planta': ('django.db.models.fields.FloatField', [], {}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pagar_catimore': ('django.db.models.fields.FloatField', [], {}),
'pagar_caturra': ('django.db.models.fields.FloatField', [], {}),
'pagar_hibrida': ('django.db.models.fields.FloatField', [], {}),
'plantas_finca': ('django.db.models.fields.FloatField', [], {}),
'plantas_injertadas': ('django.db.models.fields.FloatField', [], {}),
'plantas_vender': ('django.db.models.fields.FloatField', [], {}),
'plantas_vivero': ('django.db.models.fields.FloatField', [], {}),
'resistente_roya': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'resistente'", 'symmetrical': 'False', 'to': u"orm['produccion_cafe_finca.ResistenteRoya']"}),
'variedad': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'variedad'", 'symmetrical': 'False', 'to': u"orm['produccion_cafe_finca.Variedades']"}),
'vivero_finca': ('django.db.models.fields.IntegerField', [], {})
},
u'produccion_cafe_finca.resistenteroya': {
'Meta': {'object_name': 'ResistenteRoya'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'produccion_cafe_finca.semilla': {
'Meta': {'object_name': 'Semilla'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'produccion_cafe_finca.tiposinsumos': {
'Meta': {'object_name': 'TiposInsumos'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'produccion_cafe_finca.usoinsumos': {
'Meta': {'object_name': 'UsoInsumos'},
'aplicaciones': ('django.db.models.fields.FloatField', [], {}),
'cantidad': ('django.db.models.fields.FloatField', [], {}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'momento': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'momento'", 'symmetrical': 'False', 'to': u"orm['encuesta.Meses']"}),
'nombre': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_cafe_finca.NombreTipos']"}),
'tipo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_cafe_finca.TiposInsumos']"})
},
u'produccion_cafe_finca.usoopcionesagroecologica': {
'Meta': {'object_name': 'UsoOpcionesAgroecologica'},
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nivel': ('django.db.models.fields.IntegerField', [], {}),
'opcion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_cafe_finca.Opciones']"})
},
u'produccion_cafe_finca.variedadedadroya': {
'Meta': {'object_name': 'VariedadEdadRoya'},
'area': ('django.db.models.fields.FloatField', [], {}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['encuesta.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nivel_roya': ('django.db.models.fields.FloatField', [], {}),
'nombre_plantio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['produccion_cafe_finca.Plantio']"}),
'produccion_2012': ('django.db.models.fields.FloatField', [], {}),
'produccion_2013': ('django.db.models.fields.FloatField', [], {}),
'produccion_2014': ('django.db.models.fields.FloatField', [], {}),
'variedades': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'variedades'", 'symmetrical': 'False', 'to': u"orm['produccion_cafe_finca.Variedades']"})
},
u'produccion_cafe_finca.variedades': {
'Meta': {'object_name': 'Variedades'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['produccion_cafe_finca']
| |
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals
from dockpulp import setup_logger
"""
Push built image to pulp registry
"""
from atomic_reactor.plugin import PostBuildPlugin
from atomic_reactor.util import ImageName
import dockpulp
import dockpulp.imgutils
import os
import re
import tempfile
class PulpUploader(object):
CER = 'pulp.cer'
KEY = 'pulp.key'
def __init__(self, workflow, pulp_instance, filename, log, pulp_secret_path=None, username=None,
password=None):
self.workflow = workflow
self.pulp_instance = pulp_instance
self.filename = filename
self.pulp_secret_path = pulp_secret_path
self.log = log
# U/N & password has bigger prio than secret cert
self.username = username
self.password = password
def _check_file(self):
# Sanity-check image
metadata = dockpulp.imgutils.get_metadata(self.filename)
vers = dockpulp.imgutils.get_versions(metadata)
for _, version in vers.items():
verparts = version.split('.')
major = int(verparts[0])
if major < 1:
minor = 0
if len(verparts) > 1:
minor = int(verparts[1])
if minor < 10:
raise RuntimeError('An image layer uses an unsupported '
'version of docker (%s)' % version)
r_chk = dockpulp.imgutils.check_repo(self.filename)
if r_chk == 1:
raise RuntimeError('Image is missing a /repositories file')
elif r_chk == 2:
raise RuntimeError('Pulp demands exactly 1 repo in /repositories')
elif r_chk == 3:
raise RuntimeError('/repositories references external images')
def _set_auth(self, p):
# The pulp.cer and pulp.key values must be set in a
# 'Secret'-type resource, and referenced by the sourceSecret
# for the build. The path to our files is now given in the
# environment variable SOURCE_SECRET_PATH.
if self.username and self.password:
p.login(self.username, self.password)
else:
if self.pulp_secret_path is not None:
path = self.pulp_secret_path
self.log.info("using configured path %s for secrets" % path)
else:
path = os.environ["SOURCE_SECRET_PATH"]
self.log.info("SOURCE_SECRET_PATH=%s from environment" % path)
# Work out the pathnames for the certificate/key pair.
cer = os.path.join(path, self.CER)
key = os.path.join(path, self.KEY)
if not os.path.exists(cer):
raise RuntimeError("Certificate does not exist.")
if not os.path.exists(key):
raise RuntimeError("Key does not exist.")
# Tell dockpulp.
p.set_certs(cer, key)
def push_tarball_to_pulp(self, image_names):
self.log.info("checking image before upload")
self._check_file()
p = dockpulp.Pulp(env=self.pulp_instance)
self._set_auth(p)
# {
# "repo-id": {
# "registry-id": "",
# "tags": [],
# },
# ...
# }
repos_tags_mapping = {}
for image in image_names:
repo = image.pulp_repo
repos_tags_mapping.setdefault(repo, {})
repos_tags_mapping[repo]["registry-id"] = image.to_str(registry=False, tag=False)
repos_tags_mapping[repo].setdefault("tags", [])
repos_tags_mapping[repo]["tags"].append(image.tag)
self.log.info("repo_tags_mapping = %s", repos_tags_mapping)
task_ids = p.push_tar_to_pulp(repos_tags_mapping, self.filename)
self.log.info("waiting for repos to be published to crane, tasks: %s",
", ".join(map(str, task_ids)))
p.watch_tasks(task_ids)
# Store the registry URI in the push configuration
# We only want the hostname[:port]
pulp_registry = re.sub(r'^https?://([^/]*)/?.*',
lambda m: m.groups()[0],
p.registry)
self.workflow.push_conf.add_pulp_registry(self.pulp_instance,
pulp_registry)
# Return the set of qualified repo names for this image
return [ImageName(registry=pulp_registry, repo=repodata["registry-id"], tag=tag)
for repo, repodata in repos_tags_mapping.items()
for tag in repodata['tags']]
class PulpPushPlugin(PostBuildPlugin):
key = "pulp_push"
can_fail = False
def __init__(self, tasker, workflow, pulp_registry_name, load_squashed_image=None,
load_exported_image=None, image_names=None, pulp_secret_path=None,
username=None, password=None, dockpulp_loglevel=None):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:param pulp_registry_name: str, name of pulp registry to use, specified in /etc/dockpulp.conf
:param load_squashed_image: obsolete name for load_exported_image, please don't use
:param load_exported_image: bool, use exported tar instead of image from Docker
:param image_names: list of additional image names
:param pulp_secret_path: path to pulp.cer and pulp.key; $SOURCE_SECRET_PATH otherwise
:param username: pulp username, used in preference to certificate and key
:param password: pulp password, used in preference to certificate and key
"""
# call parent constructor
super(PulpPushPlugin, self).__init__(tasker, workflow)
self.pulp_registry_name = pulp_registry_name
self.image_names = image_names
if load_squashed_image is not None and load_exported_image is not None and \
(load_squashed_image != load_exported_image):
raise RuntimeError(
'Can\'t use load_squashed_image and load_exported_image with different values')
if load_squashed_image is not None:
self.log.warning(
'load_squashed_image argument is obsolete and will be removed in a future version;'
'please use load_exported_image instead')
self.load_exported_image = load_exported_image or load_squashed_image or False
self.pulp_secret_path = pulp_secret_path
self.username = username
self.password = password
if dockpulp_loglevel is not None:
logger = setup_logger(dockpulp.log)
try:
logger.setLevel(dockpulp_loglevel)
except (ValueError, TypeError) as ex:
self.log.error("Can't set provided log level %s: %s",
repr(dockpulp_loglevel), repr(ex))
def push_tar(self, image_path, image_names=None):
# Find out how to tag this image.
self.log.info("image names: %s", [str(image_name) for image_name in image_names])
# Give that compressed tarball to pulp.
uploader = PulpUploader(self.workflow, self.pulp_registry_name, image_path, self.log,
pulp_secret_path=self.pulp_secret_path, username=self.username,
password=self.password)
return uploader.push_tarball_to_pulp(image_names)
def run(self):
image_names = self.workflow.tag_conf.images[:]
# Add in additional image names, if any
if self.image_names:
self.log.info("extending image names: %s", self.image_names)
image_names += [ImageName.parse(x) for x in self.image_names]
if self.load_exported_image:
if len(self.workflow.exported_image_sequence) == 0:
raise RuntimeError('no exported image to push to pulp')
crane_repos = self.push_tar(self.workflow.exported_image_sequence[-1].get("path"),
image_names)
else:
# Work out image ID
image = self.workflow.image
self.log.info("fetching image %s from docker", image)
with tempfile.NamedTemporaryFile(prefix='docker-image-', suffix='.tar') as image_file:
image_file.write(self.tasker.d.get_image(image).data)
crane_repos = self.push_tar(image_file.name, image_names)
for image_name in crane_repos:
self.log.info("image available at %s", str(image_name))
return crane_repos
| |
# Copyright 2016-2017 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from enum import IntEnum
from typing import Union
import warnings
from rclpy.duration import Duration
from rclpy.impl.implementation_singleton import rclpy_implementation as _rclpy
class QoSPolicyKind(IntEnum):
"""
Enum for types of QoS policies that a Publisher or Subscription can set.
This enum matches the one defined in rmw/incompatible_qos_events_statuses.h
"""
# TODO(mm3188): obtain these enum values from the rmw layer, instead of hardcoding
INVALID = 1 << 0
DURABILITY = 1 << 1
DEADLINE = 1 << 2
LIVELINESS = 1 << 3
RELIABILITY = 1 << 4
HISTORY = 1 << 5
LIFESPAN = 1 << 6,
DEPTH = 1 << 7,
LIVELINESS_LEASE_DURATION = 1 << 8,
AVOID_ROS_NAMESPACE_CONVENTIONS = 1 << 9,
def qos_policy_name_from_kind(policy_kind: Union[QoSPolicyKind, int]):
"""Get QoS policy name from QoSPolicyKind enum."""
return QoSPolicyKind(policy_kind).name
class InvalidQoSProfileException(Exception):
"""Raised when constructing a QoSProfile with invalid arguments."""
def __init__(self, *args):
Exception.__init__(self, 'Invalid QoSProfile', *args)
class QoSProfile:
"""Define Quality of Service policies."""
# default QoS profile not exposed to the user to encourage them to think about QoS settings
__qos_profile_default_dict = \
_rclpy.rmw_qos_profile_t.predefined('qos_profile_default').to_dict()
__slots__ = [
'_history',
'_depth',
'_reliability',
'_durability',
'_lifespan',
'_deadline',
'_liveliness',
'_liveliness_lease_duration',
'_avoid_ros_namespace_conventions',
]
def __init__(self, **kwargs):
assert all('_' + key in self.__slots__ for key in kwargs.keys()), \
'Invalid arguments passed to constructor: %r' % kwargs.keys()
if 'history' not in kwargs:
if 'depth' not in kwargs:
raise InvalidQoSProfileException('History and/or depth settings are required.')
kwargs['history'] = QoSHistoryPolicy.KEEP_LAST
self.history = kwargs.get('history')
if (
QoSHistoryPolicy.KEEP_LAST == self.history and
'depth' not in kwargs
):
raise InvalidQoSProfileException('History set to KEEP_LAST without a depth setting.')
self.depth = kwargs.get('depth', QoSProfile.__qos_profile_default_dict['depth'])
self.reliability = kwargs.get(
'reliability', QoSProfile.__qos_profile_default_dict['reliability'])
self.durability = kwargs.get(
'durability', QoSProfile.__qos_profile_default_dict['durability'])
self.lifespan = kwargs.get('lifespan', QoSProfile.__qos_profile_default_dict['lifespan'])
self.deadline = kwargs.get('deadline', QoSProfile.__qos_profile_default_dict['deadline'])
self.liveliness = kwargs.get(
'liveliness', QoSProfile.__qos_profile_default_dict['liveliness'])
self.liveliness_lease_duration = kwargs.get(
'liveliness_lease_duration',
QoSProfile.__qos_profile_default_dict['liveliness_lease_duration'])
self.avoid_ros_namespace_conventions = kwargs.get(
'avoid_ros_namespace_conventions',
QoSProfile.__qos_profile_default_dict['avoid_ros_namespace_conventions'])
@property
def history(self):
"""
Get field 'history'.
:returns: history attribute
:rtype: QoSHistoryPolicy
"""
return self._history
@history.setter
def history(self, value):
assert isinstance(value, QoSHistoryPolicy) or isinstance(value, int)
self._history = QoSHistoryPolicy(value)
@property
def reliability(self):
"""
Get field 'reliability'.
:returns: reliability attribute
:rtype: QoSReliabilityPolicy
"""
return self._reliability
@reliability.setter
def reliability(self, value):
assert isinstance(value, QoSReliabilityPolicy) or isinstance(value, int)
self._reliability = QoSReliabilityPolicy(value)
@property
def durability(self):
"""
Get field 'durability'.
:returns: durability attribute
:rtype: QoSDurabilityPolicy
"""
return self._durability
@durability.setter
def durability(self, value):
assert isinstance(value, QoSDurabilityPolicy) or isinstance(value, int)
self._durability = QoSDurabilityPolicy(value)
@property
def depth(self):
"""
Get field 'depth'.
:returns: depth attribute
:rtype: int
"""
return self._depth
@depth.setter
def depth(self, value):
assert isinstance(value, int)
self._depth = value
@property
def lifespan(self):
"""
Get field 'lifespan'.
:returns: lifespan attribute
:rtype: Duration
"""
return self._lifespan
@lifespan.setter
def lifespan(self, value):
assert isinstance(value, Duration)
self._lifespan = value
@property
def deadline(self):
"""
Get field 'deadline'.
:returns: deadline attribute.
:rtype: Duration
"""
return self._deadline
@deadline.setter
def deadline(self, value):
assert isinstance(value, Duration)
self._deadline = value
@property
def liveliness(self):
"""
Get field 'liveliness'.
:returns: liveliness attribute
:rtype: QoSLivelinessPolicy
"""
return self._liveliness
@liveliness.setter
def liveliness(self, value):
assert isinstance(value, (QoSLivelinessPolicy, int))
self._liveliness = QoSLivelinessPolicy(value)
@property
def liveliness_lease_duration(self):
"""
Get field 'liveliness_lease_duration'.
:returns: liveliness_lease_duration attribute.
:rtype: Duration
"""
return self._liveliness_lease_duration
@liveliness_lease_duration.setter
def liveliness_lease_duration(self, value):
assert isinstance(value, Duration)
self._liveliness_lease_duration = value
@property
def avoid_ros_namespace_conventions(self):
"""
Get field 'avoid_ros_namespace_conventions'.
:returns: avoid_ros_namespace_conventions attribute
:rtype: bool
"""
return self._avoid_ros_namespace_conventions
@avoid_ros_namespace_conventions.setter
def avoid_ros_namespace_conventions(self, value):
assert isinstance(value, bool)
self._avoid_ros_namespace_conventions = value
def get_c_qos_profile(self):
return _rclpy.rmw_qos_profile_t(
self.history,
self.depth,
self.reliability,
self.durability,
self.lifespan.get_c_duration(),
self.deadline.get_c_duration(),
self.liveliness,
self.liveliness_lease_duration.get_c_duration(),
self.avoid_ros_namespace_conventions,
)
def __eq__(self, other):
if not isinstance(other, QoSProfile):
return False
return all(
self.__getattribute__(slot) == other.__getattribute__(slot)
for slot in self.__slots__)
def __str__(self):
return f'{type(self).__name__}(%s)' % (
', '.join(f'{slot[1:]}=%s' % getattr(self, slot) for slot in self.__slots__)
)
class QoSPolicyEnum(IntEnum):
"""
Base for QoS Policy enumerations.
Provides helper function to filter keys for utilities.
"""
@classmethod
def short_keys(cls):
"""Return a list of shortened typing-friendly enum values."""
return [k.lower() for k in cls.__members__.keys() if not k.startswith('RMW')]
@classmethod
def get_from_short_key(cls, name):
"""Retrieve a policy type from a short name, case-insensitive."""
return cls[name.upper()].value
@property
def short_key(self):
for k, v in self.__class__.__members__.items():
if k.startswith('RMW'):
continue
if self.value == v:
return k.lower()
raise AttributeError(
'failed to find value %s in %s' %
(self.value, self.__class__.__name__))
class _DeprecatedPolicyValueAlias:
"""Helper to deprecate a policy value."""
def __init__(self, replacement_name, deprecated_name):
self.replacement_name = replacement_name
self.deprecated_name = deprecated_name
def __get__(self, obj, policy_cls):
warnings.warn(
f'{policy_cls.__name__}.{self.deprecated_name} is deprecated. '
f'Use {policy_cls.__name__}.{self.replacement_name} instead.'
)
return policy_cls[self.replacement_name]
def _deprecated_policy_value_aliases(pairs):
def decorator(policy_cls):
for deprecated_name, replacement_name in pairs:
setattr(
policy_cls,
deprecated_name,
_DeprecatedPolicyValueAlias(replacement_name, deprecated_name)
)
return policy_cls
return decorator
@_deprecated_policy_value_aliases((
('RMW_QOS_POLICY_HISTORY_SYSTEM_DEFAULT', 'SYSTEM_DEFAULT'),
('RMW_QOS_POLICY_HISTORY_KEEP_LAST', 'KEEP_LAST'),
('RMW_QOS_POLICY_HISTORY_KEEP_ALL', 'KEEP_ALL'),
('RMW_QOS_POLICY_HISTORY_UNKNOWN', 'UNKNOWN'),
))
class HistoryPolicy(QoSPolicyEnum):
"""
Enum for QoS History settings.
This enum matches the one defined in rmw/types.h
"""
SYSTEM_DEFAULT = 0
KEEP_LAST = 1
KEEP_ALL = 2
UNKNOWN = 3
# Alias with the old name, for retrocompatibility
QoSHistoryPolicy = HistoryPolicy
@_deprecated_policy_value_aliases((
('RMW_QOS_POLICY_RELIABILITY_SYSTEM_DEFAULT', 'SYSTEM_DEFAULT'),
('RMW_QOS_POLICY_RELIABILITY_RELIABLE', 'RELIABLE'),
('RMW_QOS_POLICY_RELIABILITY_BEST_EFFORT', 'BEST_EFFORT'),
('RMW_QOS_POLICY_RELIABILITY_UNKNOWN', 'UNKNOWN'),
))
class ReliabilityPolicy(QoSPolicyEnum):
"""
Enum for QoS Reliability settings.
This enum matches the one defined in rmw/types.h
"""
SYSTEM_DEFAULT = 0
RELIABLE = 1
BEST_EFFORT = 2
UNKNOWN = 3
# Alias with the old name, for retrocompatibility
QoSReliabilityPolicy = ReliabilityPolicy
@_deprecated_policy_value_aliases((
('RMW_QOS_POLICY_DURABILITY_SYSTEM_DEFAULT', 'SYSTEM_DEFAULT'),
('RMW_QOS_POLICY_DURABILITY_TRANSIENT_LOCAL', 'TRANSIENT_LOCAL'),
('RMW_QOS_POLICY_DURABILITY_VOLATILE', 'VOLATILE'),
('RMW_QOS_POLICY_DURABILITY_UNKNOWN', 'UNKNOWN'),
))
class DurabilityPolicy(QoSPolicyEnum):
"""
Enum for QoS Durability settings.
This enum matches the one defined in rmw/types.h
"""
SYSTEM_DEFAULT = 0
TRANSIENT_LOCAL = 1
VOLATILE = 2
UNKNOWN = 3
# Alias with the old name, for retrocompatibility
QoSDurabilityPolicy = DurabilityPolicy
@_deprecated_policy_value_aliases((
('RMW_QOS_POLICY_LIVELINESS_SYSTEM_DEFAULT', 'SYSTEM_DEFAULT'),
('RMW_QOS_POLICY_LIVELINESS_AUTOMATIC', 'AUTOMATIC'),
('RMW_QOS_POLICY_LIVELINESS_MANUAL_BY_TOPIC', 'MANUAL_BY_TOPIC'),
('RMW_QOS_POLICY_DURABILITY_UNKNOWN', 'UNKNOWN'),
))
class LivelinessPolicy(QoSPolicyEnum):
"""
Enum for QoS Liveliness settings.
This enum matches the one defined in rmw/types.h
"""
SYSTEM_DEFAULT = 0
AUTOMATIC = 1
MANUAL_BY_TOPIC = 3
UNKNOWN = 4
# Alias with the old name, for retrocompatibility
QoSLivelinessPolicy = LivelinessPolicy
# The details of the following profiles can be found at
# 1. ROS QoS principles:
# https://design.ros2.org/articles/qos.html
# 2. ros2/rmw : rmw/include/rmw/qos_profiles.h
#: Used for initialization. Should not be used as the actual QoS profile.
qos_profile_unknown = QoSProfile(**_rclpy.rmw_qos_profile_t.predefined(
'qos_profile_unknown').to_dict())
#: Uses the default QoS settings defined in the DDS vendor tool
qos_profile_system_default = QoSProfile(**_rclpy.rmw_qos_profile_t.predefined(
'qos_profile_system_default').to_dict())
#: For sensor data, using best effort reliability and small
#: queue depth
qos_profile_sensor_data = QoSProfile(**_rclpy.rmw_qos_profile_t.predefined(
'qos_profile_sensor_data').to_dict())
#: For services, using reliable reliability and volatile durability
qos_profile_services_default = QoSProfile(**_rclpy.rmw_qos_profile_t.predefined(
'qos_profile_services_default').to_dict())
#: For parameter communication. Similar to service QoS profile but with larger
#: queue depth so that requests do not get lost.
qos_profile_parameters = QoSProfile(**_rclpy.rmw_qos_profile_t.predefined(
'qos_profile_parameters').to_dict())
#: For parameter change events. Currently same as the QoS profile for
#: parameters.
qos_profile_parameter_events = QoSProfile(**_rclpy.rmw_qos_profile_t.predefined(
'qos_profile_parameter_events').to_dict())
# Separate rcl_action profile defined at
# ros2/rcl : rcl/rcl_action/include/rcl_action/default_qos.h
#
#: For actions, using reliable reliability, transient-local durability.
qos_profile_action_status_default = QoSProfile(**_rclpy.rclpy_action_get_rmw_qos_profile(
'rcl_action_qos_profile_status_default'))
class QoSPresetProfiles(Enum):
UNKNOWN = qos_profile_unknown
SYSTEM_DEFAULT = qos_profile_system_default
SENSOR_DATA = qos_profile_sensor_data
SERVICES_DEFAULT = qos_profile_services_default
PARAMETERS = qos_profile_parameters
PARAMETER_EVENTS = qos_profile_parameter_events
ACTION_STATUS_DEFAULT = qos_profile_action_status_default
"""Noted that the following are duplicated from QoSPolicyEnum.
Our supported version of Python3 (3.5) doesn't have a fix that allows mixins on Enum.
"""
@classmethod
def short_keys(cls):
"""Return a list of shortened typing-friendly enum values."""
return [k.lower() for k in cls.__members__.keys() if not k.startswith('RMW')]
@classmethod
def get_from_short_key(cls, name):
"""Retrieve a policy type from a short name, case-insensitive."""
return cls[name.upper()].value
QoSCompatibility = _rclpy.QoSCompatibility
def qos_check_compatible(publisher_qos: QoSProfile, subscription_qos: QoSProfile):
"""
Check if two QoS profiles are compatible.
Two QoS profiles are compatible if a publisher and subscription
using the QoS policies can communicate with each other.
If any policies have value "system default" or "unknown" then it is possible that
compatibility cannot be determined.
In this case, the value QoSCompatibility.WARNING is set as part of
the returned structure.
"""
result = _rclpy.rclpy_qos_check_compatible(
publisher_qos.get_c_qos_profile(),
subscription_qos.get_c_qos_profile()
)
compatibility = QoSCompatibility(
result.compatibility
)
reason = result.reason
return compatibility, reason
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for return_statements module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.converters import return_statements
from tensorflow.python.autograph.core import converter_testing
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
class SingleReturnTest(converter_testing.TestCase):
def assertTransformedEquivalent(self, test_fn, *inputs):
ns = {'ops': ops}
with self.converted(test_fn, return_statements, ns) as result:
self.assertEqual(test_fn(*inputs), result.test_fn(*inputs))
def test_straightline(self):
def test_fn(x):
return x * x
self.assertTransformedEquivalent(test_fn, 2)
def test_conditional(self):
def test_fn(x):
if x > 0:
return x
else:
return x * x
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
def test_contitional_missing_else(self):
def test_fn(x):
if x > 0:
return x
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
def test_conditional_missing_else_then_default(self):
def test_fn(x):
if x > 0:
return x
return x * x
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
def test_conditional_else_only_then_default(self):
def test_fn(x):
if x < 0:
x *= x
else:
return x
return x
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
def test_conditional_nested(self):
def test_fn(x):
if x > 0:
if x < 5:
return x
else:
return x * x
else:
return x * x * x
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
self.assertTransformedEquivalent(test_fn, 5)
def test_context_manager(self):
def test_fn(x):
with ops.name_scope(''):
return x * x
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
def test_context_manager_in_conditional(self):
def test_fn(x):
if x > 0:
with ops.name_scope(''):
return x * x
else:
return x
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
def text_conditional_in_context_manager(self):
def test_fn(x):
with ops.name_scope(''):
if x > 0:
return x * x
else:
return x
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
def test_no_return(self):
def test_fn(x):
x *= x
self.assertTransformedEquivalent(test_fn, 2)
def test_nested_function(self):
def test_fn(x):
def inner_fn(y):
if y > 0:
return y * y
else:
return y
return inner_fn(x)
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
def test_nested_function_in_control_flow(self):
def test_fn(x):
if x:
def inner_fn(y):
return y
inner_fn(x)
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, -2)
def test_for_loop(self):
def test_fn(n):
for _ in range(n):
return 1
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, 0)
def test_while_loop(self):
def test_fn(n):
i = 0
s = 0
while i < n:
i += 1
s += i
if s > 4:
return s
return -1
self.assertTransformedEquivalent(test_fn, 0)
self.assertTransformedEquivalent(test_fn, 2)
self.assertTransformedEquivalent(test_fn, 4)
def test_null_return(self):
def test_fn(n):
if n > 4:
return
return
self.assertTransformedEquivalent(test_fn, 4)
self.assertTransformedEquivalent(test_fn, 5)
def test_nested_multiple_withs(self):
def test_fn(x):
v = []
while x > 0:
x -= 1
with ops.name_scope(''):
if x % 2 == 0:
return v
with ops.name_scope(''):
v.append(x)
v.append(x)
return v
self.assertTransformedEquivalent(test_fn, 0)
self.assertTransformedEquivalent(test_fn, 1)
self.assertTransformedEquivalent(test_fn, 3)
self.assertTransformedEquivalent(test_fn, 4)
def test_multiple_returns_in_nested_scope(self):
def test_fn(a):
v = []
for x in a:
x -= 1
if x > 100:
return v
try:
raise ValueError('intentional')
except ValueError: # pylint:disable=bare-except
return v
v.append(x)
return v
self.assertTransformedEquivalent(test_fn, [])
self.assertTransformedEquivalent(test_fn, [1])
self.assertTransformedEquivalent(test_fn, [2])
self.assertTransformedEquivalent(test_fn, [1, 2, 3])
if __name__ == '__main__':
test.main()
| |
"""
Unit tests for the lexer
"""
import pytest
from pypred import parser, ast
class MockPred(object):
def static_resolve(self, identifier):
if identifier[0] == identifier[-1] and identifier[0] in ("'", "\""):
return identifier[1:-1]
return ast.Undefined()
def resolve_identifier(self, doc, ident):
if ident in doc:
return doc[ident]
else:
return ast.Undefined()
class TestAST(object):
def ast(self, inp):
lexer = parser.get_lexer()
p = parser.get_parser(lexer=lexer)
return p.parse(inp, lexer=lexer)
def test_jack_and_jill(self):
a = self.ast("name is Jack and friend_name is Jill")
valid, info = a.validate()
assert valid
def test_bad_number(self):
a = ast.Number("0..0")
valid, info = a.validate()
assert not valid
assert "Failed to convert" in info["errors"][0]
def test_bad_constant(self):
a = ast.Constant(42)
valid, info = a.validate()
assert not valid
assert "Invalid Constant" in info["errors"][0]
def test_bad_regex_type(self):
a = ast.Regex(42)
valid, info = a.validate()
assert not valid
assert "must be a string" in info["errors"][0]
def test_bad_regex(self):
a = ast.Regex("(abc")
valid, info = a.validate()
assert not valid
assert "Compilation failed" in info["errors"][0]
assert "(abc" in info["regex"]
assert info["regex"]["(abc"] == "unbalanced parenthesis" or\
info["regex"]["(abc"].startswith("missing ), unterminated subpattern")
def test_bad_regex_inp(self):
a = self.ast("foo matches '(abc'")
valid, info = a.validate()
assert not valid
assert "Compilation failed" in info["errors"][0]
assert "(abc" in info["regex"]
assert info["regex"]["(abc"] == "unbalanced parenthesis" or\
info["regex"]["(abc"].startswith("missing ), unterminated subpattern")
def test_regex_without_modifiers(self):
a = self.ast("foo matches 'abc'")
b = self.ast('foo matches "abc"')
c = self.ast("foo matches /abc/")
a_valid, _ = a.validate()
b_valid, _ = b.validate()
c_valid, _ = c.validate()
assert a_valid
assert b_valid
assert c_valid
def test_regex_with_modifiers(self):
a = self.ast("foo matches /^abc+[0-9]{2,}$/uims")
valid, _ = a.validate()
assert valid
def test_regex_modifiers(self):
a = self.ast("foo matches /abc/uis")
b = self.ast("foo matches /^abc+[0-9]{2,}$/uims")
assert a.right.modifiers == 'uis'
assert b.right.modifiers == 'uims'
def test_match_bad_arg(self):
a = ast.MatchOperator(ast.Literal("foo"), ast.Literal("bar"))
valid, info = a.validate()
assert not valid
assert "must take a regex" in info["errors"][0]
def test_contains_bad(self):
a = ast.ContainsOperator(ast.Literal("foo"), ast.Empty())
valid, info = a.validate()
assert not valid
assert "Contains operator must take" in info["errors"][0]
def test_contains_valid_args(self):
a = ast.ContainsOperator(ast.Literal("foo"), ast.Literal("bar"))
valid, info = a.validate()
assert valid
def test_bad_compare(self):
a = ast.CompareOperator("!", ast.Literal("foo"), ast.Empty())
valid, info = a.validate()
assert not valid
assert "Unknown compare" in info["errors"][0]
def test_bad_logic(self):
a = ast.LogicalOperator("!", ast.Literal("foo"), ast.Empty())
valid, info = a.validate()
assert not valid
assert "Unknown logical" in info["errors"][0]
def test_bad_child(self):
c = ast.CompareOperator("!", ast.Literal("foo"), ast.Empty())
a = ast.LogicalOperator("and", ast.Literal("foo"), c)
valid, info = a.validate()
assert not valid
assert "Unknown compare" in info["errors"][0]
def test_logical_eval1(self):
l = ast.Literal("l")
r = ast.Literal("r")
a = ast.LogicalOperator('and', l, r)
res, ctx = a.analyze(MockPred(), {"l": True, "r": False})
assert not res
assert ctx.literals["l"] == True
assert ctx.literals["r"] == False
assert "Right hand side of AND operator at" in ctx.failed[0]
def test_logical_eval2(self):
l = ast.Literal("l")
r = ast.Literal("r")
a = ast.LogicalOperator('and', l, r)
res, ctx = a.analyze(MockPred(), {"l": True, "r": True})
assert res
assert ctx.literals["l"] == True
assert ctx.literals["r"] == True
def test_logical_eval3(self):
l = ast.Literal("l")
r = ast.Literal("r")
a = ast.LogicalOperator('or', l, r)
res, ctx = a.analyze(MockPred(), {"l": False, "r": False})
assert not res
assert ctx.literals["l"] == False
assert ctx.literals["r"] == False
assert "Both sides of OR operator at" in ctx.failed[0]
def test_logical_eval4(self):
l = ast.Literal("l")
r = ast.Literal("r")
a = ast.LogicalOperator('or', l, r)
res, ctx = a.analyze(MockPred(), {"l": False, "r": True})
assert res
assert ctx.literals["l"] == False
assert ctx.literals["r"] == True
def test_logical_eval5(self):
l = ast.Literal("l")
r = ast.Literal("r")
a = ast.LogicalOperator('or', l, r)
res, ctx = a.analyze(MockPred(), {"l": False})
assert not res
assert ctx.literals["l"] == False
assert ctx.literals["r"] == ast.Undefined()
assert "Both sides of OR operator" in ctx.failed[0]
def test_logical_eval6(self):
"Short circuit logic"
l = ast.Literal("l")
r = ast.Literal("r")
a = ast.LogicalOperator('or', l, r)
res, ctx = a.analyze(MockPred(), {"l": True})
assert res
assert ctx.literals["l"] == True
assert "r" not in ctx.literals
def test_logical_eval7(self):
"Short circuit logic"
l = ast.Literal("l")
r = ast.Literal("r")
a = ast.LogicalOperator('and', l, r)
res, ctx = a.analyze(MockPred(), {"l": False})
assert not res
assert ctx.literals["l"] == False
assert "r" not in ctx.literals
assert "Left hand side" in ctx.failed[0]
def test_negate_false(self):
l = ast.Literal("l")
a = ast.NegateOperator(l)
res, ctx = a.analyze(MockPred(), {"l": False})
assert res
assert ctx.literals["l"] == False
def test_negate_true(self):
l = ast.Literal("l")
a = ast.NegateOperator(l)
res, ctx = a.analyze(MockPred(), {"l": True})
assert not res
assert ctx.literals["l"] == True
@pytest.mark.parametrize(("type",), [
(">=",), (">",), ("<",), ("<=",), ("=",), ("==",), ("!=",), ("is",)])
def test_compare(self, type):
l = ast.Literal("l")
r = ast.Literal("r")
a = ast.CompareOperator(type, l, r)
d = {"l": 1, "r": 5}
res, ctx = a.analyze(MockPred(), d)
# Determine the expected result
if type == "=":
s = '%d %s %d' % (d["l"], "==", d["r"])
else:
s = '%d %s %d' % (d["l"], type, d["r"])
expected = eval(s)
assert res == expected
if not res:
assert ("%s comparison at" % type.upper()) in ctx.failed[0]
assert ctx.literals["l"] == d["l"]
assert ctx.literals["r"] == d["r"]
@pytest.mark.parametrize(("type",), [
(">=",), (">",), ("<",), ("<=",), ("=",), ("==",), ("!=",), ("is",)])
def test_compare_undef(self, type):
l = ast.Literal("l")
r = ast.Literal("r")
a = ast.CompareOperator(type, l, r)
d = {"l": 1}
res, ctx = a.analyze(MockPred(), d)
# Determine the expected result
if type == "!=":
assert res
else:
assert not res
if not res:
assert ("%s comparison at" % type.upper()) in ctx.failed[0]
assert ctx.literals["l"] == d["l"]
assert ctx.literals["r"] == ast.Undefined()
@pytest.mark.parametrize(("type",), [
(">=",), (">",), ("<",), ("<=",), ("=",), ("==",), ("!=",), ("is",)])
def test_compare_empty(self, type):
l = ast.Literal("l")
r = ast.Literal("r")
a = ast.CompareOperator(type, l, r)
d = {"l": 1, "r": ast.Empty()}
res, ctx = a.analyze(MockPred(), d)
# Determine the expected result
if type == "!=":
assert res
else:
assert not res
if not res:
assert ("%s comparison at" % type.upper()) in ctx.failed[0]
assert ctx.literals["l"] == d["l"]
assert ctx.literals["r"] == ast.Empty()
def test_contains_invalid(self):
l = ast.Literal("l")
r = ast.Literal("r")
a = ast.ContainsOperator(l, r)
d = {"l": 1, "r": None}
res, ctx = a.analyze(MockPred(), d)
assert not res
assert "does not support contains" in ctx.failed[0]
assert ctx.literals["l"] == 1
assert "r" not in ctx.literals
def test_contains_undef(self):
l = ast.Literal("l")
r = ast.Literal("r")
a = ast.ContainsOperator(l, r)
d = {"r": 5}
res, ctx = a.analyze(MockPred(), d)
assert not res
assert "not in left side" in ctx.failed[0]
assert ctx.literals["l"] == ast.Undefined()
assert ctx.literals["r"] == 5
def test_contains_empty(self):
l = ast.Literal("l")
r = ast.Literal("r")
a = ast.ContainsOperator(l, r)
d = {"l": [], "r": 5}
res, ctx = a.analyze(MockPred(), d)
assert not res
assert "not in left side" in ctx.failed[0]
assert ctx.literals["l"] == []
assert ctx.literals["r"] == 5
def test_contains_valid(self):
l = ast.Literal("l")
r = ast.Literal("r")
a = ast.ContainsOperator(l, r)
d = {"l": [42], "r": 42}
res, ctx = a.analyze(MockPred(), d)
assert res
assert ctx.literals["l"] == [42]
assert ctx.literals["r"] == 42
def test_match_bad_types(self):
l = ast.Literal("l")
r = ast.Regex(ast.Literal('abcd'))
a = ast.MatchOperator(l, r)
d = {"l": 42}
res, ctx = a.analyze(MockPred(), d)
assert not res
assert "not a string" in ctx.failed[0]
assert ctx.literals["l"] == 42
def test_match_undef(self):
l = ast.Literal("l")
r = ast.Regex(ast.Literal('abcd'))
a = ast.MatchOperator(l, r)
d = {}
res, ctx = a.analyze(MockPred(), d)
assert not res
assert "not a string" in ctx.failed[0]
assert ctx.literals["l"] == ast.Undefined()
def test_match_no_match(self):
l = ast.Literal("l")
r = ast.Regex(ast.Literal('abcd'))
a = ast.MatchOperator(l, r)
d = {"l": "tubez"}
res, ctx = a.analyze(MockPred(), d)
assert not res
assert "does not match" in ctx.failed[0]
assert ctx.literals["l"] == "tubez"
def test_match(self):
l = ast.Literal("l")
r = ast.Regex(ast.Literal('abcd'))
a = ast.MatchOperator(l, r)
d = {"l": "abcd"}
res, ctx = a.analyze(MockPred(), d)
assert res
assert ctx.literals["l"] == "abcd"
def test_push(self):
p = ast.PushResult(True, ast.Constant(True))
class TestSet(object):
def __init__(self):
self.res = []
def push_match(self, m):
self.res.append(m)
testset = TestSet()
assert p.evaluate(testset, {})
assert testset.res == [True]
def test_branch(self):
l = ast.Literal('a')
r = ast.Literal('b')
check = ast.CompareOperator('>', l, r)
true = ast.Constant(True)
false = ast.Constant(False)
b = ast.Branch(check, true, false)
assert b.evaluate(MockPred(), {'a': 2, 'b':1})
res, ctx = b.analyze(MockPred(), {'a': 1, 'b':2})
assert not res
assert ctx.literals["a"] == 1
assert ctx.literals["b"] == 2
assert ctx.failed[-1].startswith("Right hand side")
def test_both_false(self):
c1 = ast.Constant(False)
c2 = ast.Constant(False)
n = ast.Both(c1, c2)
assert n.evaluate(MockPred(), {}) == False
def test_iterall_true(self):
c1 = ast.Constant(False)
c2 = ast.Constant(True)
n = ast.Both(c1, c2)
assert n.evaluate(MockPred(), {}) == True
def test_cached_node_uses_cache(self):
c = ast.Constant(False)
n = ast.CachedNode(c, 0)
ctx = ast.EvalContext(MockPred(), {})
ctx.cached_res[0] = True
assert n.eval(ctx)
def test_cached_node_sets_cache(self):
c = ast.Constant(False)
n = ast.CachedNode(c, 0)
ctx = ast.EvalContext(MockPred(), {})
assert not n.eval(ctx)
assert not ctx.cached_res[0]
def test_litset_eval(self):
s = ast.LiteralSet([ast.Constant(True), ast.Literal('a'), ast.Literal('b')])
ctx = ast.EvalContext(MockPred(), {'a': 2, 'b': False})
res = s.eval(ctx)
assert isinstance(res, frozenset)
assert True in res
assert False in res
assert 2 in res
def test_litset_static(self):
s = ast.LiteralSet([ast.Constant(True), ast.Literal('\"a\"')])
pred = MockPred()
s.static_resolve(pred)
ctx = ast.EvalContext(pred, {'a': 2, 'b': False})
res = s.eval(ctx)
assert s.static
assert isinstance(res, set)
assert True in res
assert "a" in res
| |
'''
Created on Mar 8, 2012
@author: freer
'''
NAMESPACE_URI = 'http://www.eufar.net/ASMM'
import datetime
import xml.dom.minidom
from PyQt4.QtCore import Qt
from PyQt4.QtCore import QDate
def create_asmm_xml(self, out_file_name):
doc = xml.dom.minidom.Document()
doc_root = add_element(doc, "MissionMetadata", doc)
doc_root.setAttribute("xmlns:asmm", NAMESPACE_URI)
current_date = datetime.date.isoformat(datetime.date.today())
if not self.create_date:
self.create_date = current_date
add_element(doc, "CreationDate", doc_root, self.create_date)
add_element(doc, "RevisionDate", doc_root, current_date)
############################
# Flight Information
############################
flightInformation = add_element(doc, "FlightInformation", doc_root)
add_element(doc, "FlightNumber", flightInformation, self.flightNumberLine.text())
add_element(doc, "Date", flightInformation, self.dateLine.date().toString(Qt.ISODate))
add_element(doc, "Campaign", flightInformation, self.campaignLine.text())
add_element(doc, "MissionScientist", flightInformation, self.missionSciLine.text())
add_element(doc, "FlightManager", flightInformation, self.flightManagerLine.text())
add_element(doc, "Platform", flightInformation, self.platformLine.text())
add_element(doc, "Operator", flightInformation, self.operatorLine.text())
add_element(doc, "Country", flightInformation, self.countryLine.text())
geographicBoundingBox = add_element(doc, "GeographicBoundingBox", flightInformation)
add_element(doc, "westBoundLongitude", geographicBoundingBox, self.westBoundLongitudeLine.text())
add_element(doc, "eastBoundLongitude", geographicBoundingBox, self.eastBoundLongitudeLine.text())
add_element(doc, "northBoundLatitude", geographicBoundingBox, self.northBoundLatitudeLine.text())
add_element(doc, "southBoundLatitude", geographicBoundingBox, self.southBoundLatitudeLine.text())
add_element(doc, "minAltitude", geographicBoundingBox, self.minAltitudeLine.text())
add_element(doc, "maxAltitude", geographicBoundingBox, self.maxAltitudeLine.text())
###########################
# Metadata Contact Info
###########################
contactInfo = add_element(doc, "ContactInfo", doc_root)
add_element(doc, "ContactName", contactInfo, self.contactNameLine.text())
add_element(doc, "ContactRole", contactInfo, self.contactRoleBox.currentText())
add_element(doc, "ContactEmail", contactInfo, self.contactEmailLine.text())
############################
# Scientific Aims
############################
scientificAims = add_element(doc, "ScientificAims", doc_root)
add_check_elements(doc, self.scientific_aims_check_dict, "SA_Code", scientificAims)
add_comment_element(doc , "SA_Other", scientificAims, self.SAOtherTextBox.toPlainText())
############################
# Geographical Region
############################
geographicalRegion = add_element(doc, "GeographicalRegion", doc_root)
add_check_elements(doc, self.geographical_region_check_dict, "GR_Code", geographicalRegion)
add_comment_element(doc, "GR_other", geographicalRegion, self.GROtherTextBox.toPlainText())
############################
# Atmospheric Features
############################
atmosphericFeatures = add_element(doc, "AtmosFeatures", doc_root)
add_check_elements(doc, self.atmospheric_features_check_dict, "AF_Code", atmosphericFeatures)
add_comment_element(doc, "AF_Other", atmosphericFeatures, self.AFOtherTextBox.toPlainText())
############################
# Cloud Types
############################
cloudTypes = add_element(doc, "CloudTypes", doc_root)
add_check_elements(doc, self.cloud_types_check_dict, "CT_Code", cloudTypes)
add_comment_element(doc, "CT_Other", cloudTypes, self.CTOtherTextBox.toPlainText())
############################
# Particles Sampled
############################
particlesSampled = add_element(doc, "ParticlesSampled", doc_root)
add_check_elements(doc, self.particles_sampled_check_dict, "PS_Code", particlesSampled)
add_comment_element(doc, "PS_Other", particlesSampled, self.PSOtherTextBox.toPlainText())
############################
# Surfaces Overflown
############################
surfacesOverflown = add_element(doc, "SurfacesOverflown", doc_root)
add_check_elements(doc, self.surfaces_overflown_check_dict, "SO_Code", surfacesOverflown)
add_comment_element(doc, "SO_Other", surfacesOverflown, self.SOOtherTextBox.toPlainText())
############################
# Altitude Ranges
############################
altitudeRanges = add_element(doc, "AltitudeRanges", doc_root)
add_check_elements(doc, self.altitude_ranges_check_dict, "AR_Code", altitudeRanges)
add_comment_element(doc, "AR_Other", altitudeRanges, self.AROtherTextBox.toPlainText())
############################
# Flight Types
############################
flightTypes = add_element(doc, "FlightTypes", doc_root)
add_check_elements(doc, self.flight_types_check_dict, "FT_Code", flightTypes)
add_comment_element(doc, "FT_Other", flightTypes, self.FTOtherTextBox.toPlainText())
############################
# Satellite coordination
############################
satelliteCoordination = add_element(doc, "SatelliteCoordination", doc_root)
add_check_elements(doc, self.satellite_coordination_check_dict, "SC_Code", satelliteCoordination)
add_comment_element(doc, "SC_Other", satelliteCoordination, self.SCOtherTextBox.toPlainText())
############################
# Surface Observations
############################
surfaceObs = add_element(doc, "SurfaceObs", doc_root)
for item in self.ground_site_list:
add_element(doc, "GroundSite", surfaceObs, item)
for item in self.research_vessel_list:
add_element(doc, "ResearchVessel", surfaceObs, item)
for item in self.arm_site_list:
add_element(doc, "ArmSite", surfaceObs, item)
for item in self.arm_mobile_list:
add_element(doc, "ArmMobile", surfaceObs, item)
############################
# Other Comments
############################
if self.OtherCommentsTextBox.toPlainText():
add_element(doc, "OtherComments", doc_root, self.OtherCommentsTextBox.toPlainText())
#xml.dom.ext.PrettyPrint(doc)
f = open(out_file_name, 'w')
f.write(doc.toprettyxml(encoding='UTF-8'))
# xml.dom.ext.PrettyPrint(doc, f)
f.close()
self.saved = True
self.modified = False
def read_asmm_xml(self, in_file_name):
f = open(in_file_name, 'r')
doc = xml.dom.minidom.parse(f)
############################
# Flight Information
############################
self.create_date = get_element_value(doc, "CreationDate")
flightInformation = get_element(doc, "FlightInformation")
set_text_value(self.flightNumberLine, flightInformation, "FlightNumber")
date = get_element_value(flightInformation, "Date")
self.dateLine.setDate(QDate.fromString(date, Qt.ISODate))
set_text_value(self.campaignLine, flightInformation, "Campaign")
set_text_value(self.missionSciLine, flightInformation, "MissionScientist")
set_text_value(self.flightManagerLine, flightInformation, "FlightManager")
set_text_value(self.platformLine, flightInformation, "Platform")
set_text_value(self.operatorLine, flightInformation, "Operator")
set_text_value(self.countryLine, flightInformation, "Country")
geographicBoundingBox = get_element(flightInformation, "GeographicBoundingBox")
set_text_value(self.westBoundLongitudeLine, geographicBoundingBox, "westBoundLongitude")
set_text_value(self.eastBoundLongitudeLine, geographicBoundingBox, "eastBoundLongitude")
set_text_value(self.northBoundLatitudeLine, geographicBoundingBox, "northBoundLatitude")
set_text_value(self.southBoundLatitudeLine, geographicBoundingBox, "southBoundLatitude")
set_text_value(self.minAltitudeLine, geographicBoundingBox, "minAltitude")
set_text_value(self.maxAltitudeLine, geographicBoundingBox, "maxAltitude")
#############################
# Metadata Contact Info
#############################
contactInfo = get_element(doc, "ContactInfo")
set_text_value(self.contactNameLine, contactInfo, "ContactName")
set_text_value(self.contactEmailLine, contactInfo, "ContactEmail")
combo_text = get_element_value(contactInfo, "ContactRole")
self.contactRoleBox.setCurrentIndex(self.contactRoleBox.findText(combo_text))
#############################
# Scientific Aims
#############################
scientificAims = get_element(doc, "ScientificAims")
set_check_values(self.scientific_aims_check_dict, scientificAims, "SA_Code")
set_text_value(self.SAOtherTextBox, scientificAims, "SA_Other")
#############################
# Geographical Region
#############################
geographicalRegion = get_element(doc, "GeographicalRegion")
set_check_values(self.geographical_region_check_dict, geographicalRegion, "GR_Code")
set_text_value(self.GROtherTextBox, geographicalRegion, "GR_Other")
#############################
# Atmospheric Features
#############################
atmosphericFeatures = get_element(doc, "AtmosFeatures")
set_check_values(self.atmospheric_features_check_dict, atmosphericFeatures, "AF_Code")
set_text_value(self.AFOtherTextBox, atmosphericFeatures, "AF_Other")
#############################
# Cloud Types
#############################
cloudTypes = get_element(doc, "CloudTypes")
set_check_values(self.cloud_types_check_dict, cloudTypes, "CT_Code")
set_text_value(self.CTOtherTextBox, cloudTypes, "CT_Other")
#############################
# Particles Sampled
#############################
particlesSampled = get_element(doc, "ParticlesSampled")
set_check_values(self.particles_sampled_check_dict, particlesSampled, "PS_Code")
set_text_value(self.PSOtherTextBox, particlesSampled, "PS_Other")
#############################
# Surfaces Overflown
#############################
surfacesOverflown = get_element(doc, "SurfacesOverflown")
set_check_values(self.surfaces_overflown_check_dict, surfacesOverflown, "SO_Code")
set_text_value(self.SOOtherTextBox, surfacesOverflown, "SO_Other")
#############################
# Altitude Ranges
#############################
altitudeRanges = get_element(doc, "AltitudeRanges")
set_check_values(self.altitude_ranges_check_dict, altitudeRanges, "AR_Code")
set_text_value(self.AROtherTextBox, altitudeRanges, "AR_Other")
#############################
# Flight Types
#############################
flightTypes = get_element(doc, "FlightTypes")
set_check_values(self.flight_types_check_dict, flightTypes, "FT_Code")
set_text_value(self.FTOtherTextBox, flightTypes, "FT_Other")
#############################
# Satellite Coordination
#############################
satelliteCoordination = get_element(doc, "SatelliteCoordination")
set_check_values(self.satellite_coordination_check_dict, satelliteCoordination, "SC_Code")
set_text_value(self.SCOtherTextBox, satelliteCoordination, "SC_Other")
#############################
# Surface Observations
#############################
surfaceObservations = get_element(doc, "SurfaceObs")
self.ground_site_list = get_element_values(surfaceObservations, "GroundSite")
self.groundListWidget.addItems(self.ground_site_list)
self.research_vessel_list = get_element_values(surfaceObservations, "ResearchVessel")
self.vesselListWidget.addItems(self.research_vessel_list)
self.arm_site_list = get_element_values(surfaceObservations, "ArmSite")
self.armListWidget.addItems(self.arm_site_list)
self.arm_mobile_list = get_element_values(surfaceObservations, "ArmMobile")
self.armMobileListWidget.addItems(self.arm_mobile_list)
##############################
# Other Comments
##############################
set_text_value(self.OtherCommentsTextBox, doc, "OtherComments")
def get_element(parent, element_name):
return parent.getElementsByTagNameNS(NAMESPACE_URI, element_name)[0]
def get_element_value(parent, element_name):
elements = parent.getElementsByTagNameNS(NAMESPACE_URI, element_name)
if elements:
element = elements[0]
nodes = element.childNodes
for node in nodes:
if node.nodeType == node.TEXT_NODE:
return node.data.strip()
def get_element_values(parent, element_name):
value_list = []
elements = parent.getElementsByTagNameNS(NAMESPACE_URI, element_name)
for element in elements:
value_list.append(element.childNodes[0].data.strip())
return value_list
def set_check_values(check_dict, parent, element_name):
elements = parent.getElementsByTagNameNS(NAMESPACE_URI, element_name)
for element in elements:
check_widget = find_key(check_dict, element.childNodes[0].data.strip())
check_widget.setChecked(True)
def set_text_value(text_widget, parent, element_name):
node_data = get_element_value(parent, element_name)
if node_data:
text_widget.setText(node_data)
def add_element(doc, element_name, parent, value=None):
new_element = doc.createElementNS(NAMESPACE_URI, "asmm:" + element_name)
if value:
new_text = doc.createTextNode(unicode(value))
new_element.appendChild(new_text)
parent.appendChild(new_element)
return new_element
def add_comment_element(doc, element_name, parent, value):
if value:
add_element(doc, element_name, parent, value)
def add_check_elements(doc, check_dict, code_name, parent):
for key, val in check_dict.iteritems():
if key.isChecked():
add_element(doc, code_name, parent, val)
def find_key(dic, val):
return [k for k, v in dic.iteritems() if v == val][0]
| |
# -*- coding: utf-8 -*-
"""
lantz_core.features.util
~~~~~~~~~~~~~~~~~~~~~~~~
Tools to customize feature and help in their writings.
:copyright: 2015 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import (division, unicode_literals, print_function,
absolute_import)
from types import MethodType
from functools import update_wrapper
from future.utils import exec_
def wrap_custom_feat_method(meth, feat):
""" Wrap a HasFeature method to make it an driver method of a Feature.
This is necessary so that users can define overriding method in a natural
way in the HasFeatures subclass assuming that the driver object will be
passed as first argument and the Feature object as second when in reality
it will be the other way round due to python binding mechanism.
Parameters
----------
meth : MethodType
Method which should be used to alter the default behaviour of the
Feature.
feat : Feature
Instance of Feature whose default behaviour should be overridden.
Returns
-------
wrapped : MethodType
Method object which can be
"""
# Access the real function in case a method is passed.
if isinstance(meth, MethodType):
if meth.__self__ is feat:
return meth
wrapped = meth.__func__
else:
wrapped = meth
# Wrap if necessary the function to match the argument order.
if not hasattr(meth, '_feat_wrapped_'):
def wrapper(feat, driver, *args, **kwargs):
return wrapped(driver, feat, *args, **kwargs)
update_wrapper(wrapper, wrapped)
wrapper._feat_wrapped_ = wrapped
else:
wrapper = wrapped
return MethodType(wrapper, feat)
# --- Methods composers -------------------------------------------------------
class MethodsComposer(object):
"""Function like object used to compose feature methods calls.
All methods to call are kept in an ordered dict ensuring that they will
be called in the right order while allowing fancy insertion based on method
id.
Notes
-----
Method ids must be unique and duplicate names are removed without warning.
"""
__slots__ = ('_names', '_methods')
def __init__(self):
self._methods = []
self._names = []
def clone(self):
new = type(self)()
new._names = self._names[:]
new._methods = self._methods[:]
return new
def prepend(self, name, method):
"""Prepend a method to existing ones.
Parameters
----------
name : unicode
Id of the method. Used to find it when performing more complex
operations on the list of methods.
method : MethodType
Method bound to a feature which will be called when this object
will be called.
"""
self._remove_duplicate(name)
self._names.insert(0, name)
self._methods.insert(0, method)
def append(self, name, method):
"""Append a method to existing ones.
Parameters
----------
name : unicode
Id of the method. Used to find it when performing more complex
operations on the list of methods.
method : MethodType
Method bound to a feature which will be called when this object
will be called.
"""
self._remove_duplicate(name)
self._names.append(name)
self._methods.append(method)
def add_after(self, anchor, name, method):
"""Add the given method after a given one.
Parameters
----------
anchor : unicode
Id of the method after which to insert the given one.
name : unicode
Id of the method. Used to find it when performing more complex
operations on the list of methods.
method : MethodType
Method bound to a feature which will be called when this object
will be called.
"""
self._remove_duplicate(name)
i = self._names.index(anchor)
self._names.insert(i+1, name)
self._methods.insert(i+1, method)
def add_before(self, anchor, name, method):
"""Add the given method before the specified one.
Parameters
----------
anchor : unicode
Id of the method before which to insert the given one.
name : unicode
Id of the method. Used to find it when performing more complex
operations on the list of methods.
method : MethodType
Method bound to a feature which will be called when this object
will be called.
"""
self._remove_duplicate(name)
i = self._names.index(anchor)
self._names.insert(i, name)
self._methods.insert(i, method)
def replace(self, name, method):
"""Replace an existing method by a new one.
Only custom methods can be replaced. Methods whose presence is
linked to the feature kwargs cannot be replaced.
Parameters
----------
name : unicode
Id of the method of the method to replace.
method : MethodType
Method bound to a feature which will be called when this object
will be called.
"""
i = self._names.index(name)
self._methods[i] = method
def remove(self, name):
"""Remove a method.
Parameters
----------
name : unicode
Id of the method to remove.
"""
i = self._names.index(name)
del self._names[i]
del self._methods[i]
def reset(self):
"""Empty the composer.
"""
self._names = []
self._methods = []
def __getitem__(self, key):
return self._methods[self._names.index(key)]
def __contains__(self, item):
return item in self._names
def _remove_duplicate(self, name):
"""Remove the name from the list to avoid having duplicate ids.
"""
if name in self._names:
i = self._names.index(name)
del self._names[i]
del self._methods[i]
class PreGetComposer(MethodsComposer):
"""Composer used for pre_get methods.
"""
__slots__ = ()
def __call__(self, driver):
"""Call mimicking a pre_get method and calling all assigned methods
in order with the driver as only argument.
"""
for m in self._methods:
m(driver)
class PostGetComposer(MethodsComposer):
"""Composer for post_get methods.
"""
__slots__ = ()
def __call__(self, driver, value):
"""Call mimicking a post_get method and calling all assigned methods
in order. The value returned by each method is passed to the next one.
"""
for m in self._methods:
value = m(driver, value)
return value
class PreSetComposer(MethodsComposer):
"""Composer for pre_set methods.
"""
__slots__ = ()
def __call__(self, driver, value):
"""Call mimicking a pre_set method and calling all assigned methods
in order. The value returned by each method is passed to the next one.
"""
for m in self._methods:
value = m(driver, value)
return value
class PostSetComposer(MethodsComposer):
"""Composer for post_set methods.
"""
__slots__ = ()
def __call__(self, driver, value, d_value, response):
"""Call mimicking a post_set method and calling all assigned methods
in order.
"""
for m in self._methods:
value = m(driver, value, d_value, response)
COMPOSERS = {'pre_get': PreGetComposer, 'post_get': PostGetComposer,
'pre_set': PreSetComposer, 'post_set': PostSetComposer}
# --- Customisation decorators ------------------------------------------------
def append(id_str='custom'):
"""Mark a function to be appended to a MethodsComposer.
"""
def decorator(function):
function._composing = (id_str, 'append')
return function
return decorator
def prepend(id_str='custom'):
"""Mark a function to be prepended to a MethodsComposer.
"""
def decorator(function):
function._composing = (id_str, 'prepend')
return function
return decorator
def add_after(name, id_str='custom'):
"""Mark a function to be added after another in a MethodsComposer.
"""
def decorator(function):
function._composing = (id_str, 'add_after', name)
return function
return decorator
def add_before(name, id_str='custom'):
"""Mark a function to be added before another in a MethodsComposer.
"""
def decorator(function):
function._composing = (id_str, 'add_before', name)
return function
return decorator
def replace(id_str):
"""Mark a function to replace another in a MethodsComposer.
"""
def decorator(function):
function._composing = (id_str, 'replace')
return function
return decorator
# --- Getter/setter factories -------------------------------------------------
class AbstractGetSetFactory(object):
"""Abstract class for get/set factories.
Use by Feature to identify such a factory and use it to replace the
get/set method.
"""
def build_getter(self):
"""Build the function for getting the Feature value.
This method is called when a get/set factory is passed as the getter
argument to a Feature.
"""
raise NotImplementedError()
def build_setter(self):
"""Build the function for setting the Feature value.
This method is called when a get/set factory is passed as the setter
argument to a Feature.
"""
raise NotImplementedError()
class constant(AbstractGetSetFactory):
"""Make a Feature return always the same value.
This can only be used as a getter factory.
Parameters
----------
value :
The value the Feature should return
"""
def __init__(self, value):
super(constant, self).__init__()
self._value = value
def build_getter(self):
"""Build a trivial function to return the constant value.
"""
value = self._value
def getter(self, driver):
return value
return getter
GET_DEF =\
"""def get(self, driver):
val = {}
return {}
"""
SET_DEF =\
"""def set(self, driver, value):
cmd = {}
return driver.default_set_feature(self, cmd, value)
"""
class conditional(AbstractGetSetFactory):
"""Make a Feature modify getting/setting based on the driver state.
Parameters
----------
conditional_value : unicode
String of the form 'a if driver.b else c'. When setting the value is
accessible as value.
default : bool
Pass the result of the conditional evalutation to the
default_get/set_feature method of the driver if True, otherwise
directly return the result.
When building a setter this MUST be true.
"""
def __init__(self, conditional_value, default=False):
super(conditional, self).__init__()
self._cond = conditional_value
self._default = default
def build_getter(self):
"""Build the getter.
"""
if not self._default:
get_def = GET_DEF.format(self._cond, 'val')
else:
get_def = GET_DEF.format(self._cond,
'driver.default_get_feature(self, val)')
loc = {}
exec_(get_def, globals(), loc)
return loc['get']
def build_setter(self):
"""Build the setter.
"""
if not self._default:
raise ValueError('Can build a setter only if default is True')
loc = {}
exec_(SET_DEF.format(self._cond), globals(), loc)
return loc['set']
| |
#!/usr/bin/python3
# first init env
import env, tools
config = env.getenv("CONFIG")
tools.loadenv(config)
# must import logger after initlogging, ugly
from log import initlogging
initlogging("docklet-worker")
from log import logger
import xmlrpc.server, sys, time
from socketserver import ThreadingMixIn
import threading
import etcdlib, network, container
from nettools import netcontrol
import monitor
from lvmtool import new_group, recover_group
import psutil
##################################################################
# Worker
# Description : Worker starts at worker node to listen rpc request and complete the work
# Init() :
# get master ip
# initialize rpc server
# register rpc functions
# initialize network
# initialize lvm group
# Start() :
# register in etcd
# setup GRE tunnel
# start rpc service
##################################################################
# imitate etcdlib to genernate the key of etcdlib manually
def generatekey(path):
clustername = env.getenv("CLUSTER_NAME")
return '/'+clustername+'/'+path
class ThreadXMLRPCServer(ThreadingMixIn,xmlrpc.server.SimpleXMLRPCServer):
pass
class Worker(object):
def __init__(self, etcdclient, addr, port):
self.addr = addr
self.port = port
logger.info ("begin initialize on %s" % self.addr)
self.fspath = env.getenv('FS_PREFIX')
self.poolsize = env.getenv('DISKPOOL_SIZE')
self.etcd = etcdclient
self.master = self.etcd.getkey("service/master")[1]
self.mode=None
# waiting state is preserved for compatible.
self.etcd.setkey("machines/runnodes/"+self.addr, "waiting")
# get this node's key to judge how to init.
[status, key] = self.etcd.getkey("machines/runnodes/"+self.addr)
if status:
self.key = generatekey("machines/allnodes/"+self.addr)
else:
logger.error("get key failed. %s" % 'machines/runnodes/'+self.addr)
sys.exit(1)
# check token to check global directory
[status, token_1] = self.etcd.getkey("token")
tokenfile = open(self.fspath+"/global/token", 'r')
token_2 = tokenfile.readline().strip()
if token_1 != token_2:
logger.error("check token failed, global directory is not a shared filesystem")
sys.exit(1)
logger.info ("worker registered and checked the token")
# worker search all run nodes to judge how to init
# If the node in all node list, we will recover it.
# Otherwise, this node is new added in.
value = 'init-new'
[status, alllist] = self.etcd.listdir("machines/allnodes")
for node in alllist:
if node['key'] == self.key:
value = 'init-recovery'
break
logger.info("worker start in "+value+" mode")
Containers = container.Container(self.addr, etcdclient)
if value == 'init-new':
logger.info ("init worker with mode:new")
self.mode='new'
# check global directory do not have containers on this worker
[both, onlylocal, onlyglobal] = Containers.diff_containers()
if len(both+onlyglobal) > 0:
logger.error ("mode:new will clean containers recorded in global, please check")
sys.exit(1)
[status, info] = Containers.delete_allcontainers()
if not status:
logger.error ("delete all containers failed")
sys.exit(1)
# create new lvm VG at last
new_group("docklet-group",self.poolsize,self.fspath+"/local/docklet-storage")
#subprocess.call([self.libpath+"/lvmtool.sh", "new", "group", "docklet-group", self.poolsize, self.fspath+"/local/docklet-storage"])
elif value == 'init-recovery':
logger.info ("init worker with mode:recovery")
self.mode='recovery'
# recover lvm VG first
recover_group("docklet-group",self.fspath+"/local/docklet-storage")
#subprocess.call([self.libpath+"/lvmtool.sh", "recover", "group", "docklet-group", self.fspath+"/local/docklet-storage"])
[status, meg] = Containers.check_allcontainers()
if status:
logger.info ("all containers check ok")
else:
logger.info ("not all containers check ok")
#sys.exit(1)
else:
logger.error ("worker init mode:%s not supported" % value)
sys.exit(1)
# initialize rpc
# xmlrpc.server.SimpleXMLRPCServer(addr) -- addr : (ip-addr, port)
# if ip-addr is "", it will listen ports of all IPs of this host
logger.info ("initialize rpcserver %s:%d" % (self.addr, int(self.port)))
# logRequests=False : not print rpc log
#self.rpcserver = xmlrpc.server.SimpleXMLRPCServer((self.addr, self.port), logRequests=False)
self.rpcserver = ThreadXMLRPCServer((self.addr, int(self.port)), allow_none=True, logRequests=False)
self.rpcserver.register_introspection_functions()
self.rpcserver.register_instance(Containers)
self.rpcserver.register_function(monitor.workerFetchInfo)
# register functions or instances to server for rpc
#self.rpcserver.register_function(function_name)
# init collector to collect monitor infomation
self.con_collector = monitor.Container_Collector()
self.hosts_collector = monitor.Collector()
# initialize the network
# if worker and master run on the same node, reuse bridges
# don't need to create new bridges
if (self.addr == self.master):
logger.info ("master also on this node. reuse master's network")
else:
logger.info ("initialize network")
# 'docklet-br' of worker do not need IP Addr.
#[status, result] = self.etcd.getkey("network/workbridge")
#if not status:
# logger.error ("get bridge IP failed, please check whether master set bridge IP for worker")
#self.bridgeip = result
# create bridges for worker
#network.netsetup("init", self.bridgeip)
if self.mode == 'new':
if netcontrol.bridge_exists('docklet-br'):
netcontrol.del_bridge('docklet-br')
netcontrol.new_bridge('docklet-br')
else:
if not netcontrol.bridge_exists('docklet-br'):
logger.error("docklet-br not found")
sys.exit(1)
logger.info ("setup GRE tunnel to master %s" % self.master)
#network.netsetup("gre", self.master)
if not netcontrol.gre_exists('docklet-br', self.master):
netcontrol.setup_gre('docklet-br', self.master)
# start service of worker
def start(self):
# start collector
self.con_collector.start()
self.hosts_collector.start()
logger.info("Monitor Collector has been started.")
# worker change it state itself. Independedntly from master.
self.etcd.setkey("machines/runnodes/"+self.addr, "work")
self.etcd.setkey("cpus/"+self.addr,psutil.cpu_count())
self.etcd.setkey("mems/"+self.addr,str(int(psutil.virtual_memory().total/1024/1024)))
self.thread_sendheartbeat = threading.Thread(target=self.sendheartbeat)
self.thread_sendheartbeat.start()
# start serving for rpc
logger.info ("begins to work")
self.rpcserver.serve_forever()
# send heardbeat package to keep alive in etcd, ttl=2s
def sendheartbeat(self):
while(True):
# check send heartbeat package every 1s
time.sleep(1)
[status, value] = self.etcd.getkey("machines/runnodes/"+self.addr)
if status:
# master has know the worker so we start send heartbeat package
if value=='ok':
self.etcd.setkey("machines/runnodes/"+self.addr, "ok", ttl = 2)
else:
logger.error("get key %s failed, master crashed or initialized. restart worker please." % self.addr)
sys.exit(1)
if __name__ == '__main__':
etcdaddr = env.getenv("ETCD")
logger.info ("using ETCD %s" % etcdaddr )
clustername = env.getenv("CLUSTER_NAME")
logger.info ("using CLUSTER_NAME %s" % clustername )
# get network interface
net_dev = env.getenv("NETWORK_DEVICE")
logger.info ("using NETWORK_DEVICE %s" % net_dev )
ipaddr = network.getip(net_dev)
if ipaddr is False:
logger.error("network device is not correct")
sys.exit(1)
else:
logger.info("using ipaddr %s" % ipaddr)
# init etcdlib client
try:
etcdclient = etcdlib.Client(etcdaddr, prefix = clustername)
except Exception:
logger.error ("connect etcd failed, maybe etcd address not correct...")
sys.exit(1)
else:
logger.info("etcd connected")
cpu_quota = env.getenv('CONTAINER_CPU')
logger.info ("using CONTAINER_CPU %s" % cpu_quota )
mem_quota = env.getenv('CONTAINER_MEMORY')
logger.info ("using CONTAINER_MEMORY %s" % mem_quota )
worker_port = env.getenv('WORKER_PORT')
logger.info ("using WORKER_PORT %s" % worker_port )
logger.info("Starting worker")
worker = Worker(etcdclient, addr=ipaddr, port=worker_port)
worker.start()
| |
# coding: utf-8
# In[1]:
# In[2]:
from scipy import ndimage
from weighted import median as wmedian
from weighted import quantile as wquantile
import weightedstats as wstats
import copy
# In[3]:
import glob
# In[4]:
import os
import sys
import numpy as np
from sedfitter.fit_info import FitInfoFile
from sedfitter.models import load_parameter_table
from astropy.table import Table,join
import astropy.units as u
import astropy.constants as const
from itertools import chain
def get_menv(model_number, model_numbers, m_env):
'''
return envelope mass for a given
model number
'''
has = np.in1d(model_numbers, [int(model_number)])
if np.sum(has) == 0:
return 0
else:
return m_env[has][0]
# return m_env[np.where(model_numbers == int(model_number))][0]
def get_menv_2(tnum):
root = '/Users/johnlewisiii/Charlie/sedfitter_cf/OrionProtostars/'
menv = np.loadtxt(root+'menv')
mnum = np.loadtxt(root+'model_numbers',dtype=np.str)
# -- searchsorted(x,y) returns index of every y that is in x
# it is asking - where does y_i first appear in x
return menv[np.searchsorted(mnum,tnum)]
def append_menv(param_table):
'''
Appends MENV column to
parameter table
'''
tnum = np.array([num[:7] for num in param_table['MODEL_NAME']])
param_table['MENV'] = get_menv_2(tnum)
return param_table
def minmax(arr, w=None):
if w is not None:
arr = np.array(arr)[w]
try:
return np.nanmin(arr), np.nanmax(arr)
except:
return np.min(arr), np.max(arr)
def minbestmax(arr):
try:
return np.nanmin(arr), arr[0], np.nanmax(arr)
except:
try:
return np.min(arr), arr[0], np.max(arr)
except:
arr[0]
def jmin(arr):
try:
return np.nanmin(arr)
except:
return min(arr)
def jmax(arr):
try:
return np.nanmax(arr)
except:
return max(arr)
def mean(arr, w=None):
if w is not None:
if w[0]:
return arr[0]
else:
arr = np.array(arr)[w]
try:
return np.nanmean(arr)
except:
return np.mean(arr)
def get_av(name, avs = None):
'''
return av for a given
source
'''
if len(avs)>1:
return avs[np.where(avs[:, 0] == int(name))[0], 1][0]
else:
return avs[0]
# In[5]:
def jjmin(arr):
return np.min(arr[np.isfinite(arr)])
def jjmax(arr):
return np.max(arr[np.isfinite(arr)])
def jj_median(arr,weights,w=None):
'''
return weighted median
'''
if w is not None:
arr = np.array(arr)[w]
weights = np.array(weights)[w]
return wmedian(np.asarray(arr),np.asarray(weights))
#return arr[0]
def jj_avg_std(values, weights,w=None):
"""
Return the weighted average and standard deviation.
values, weights -- Numpy ndarrays with the same shape.
Currently set to return [50,16,84] percentiles
w allows you to select certain elements of list
"""
if w is not None:
values = np.array(values)[w]
weights = np.array(weights)[w]
values = np.asarray(values)
weights = np.asarray(weights)
median=jj_median(values,weights)
#mad = jj_median(np.abs(values-median), weights=weights)
#average = np.average(values, weights=weights)
#variance = np.average((values-average)**2, weights=weights) # Fast and numerically precise
#V1 = np.sum(weights)
#V2 = np.sum(weights**2)
#unbiased_variance = variance/(1. - (V2/(V1**2))) #use unbiased variance for small populations
#standard_deviation = np.sqrt(unbiased_variance)
q1 = wquantile(values,weights,.16)
q2 = wquantile(values,weights,.84)
#q1,q2 = minmax(values,w=w)
#q1 = values[0]
return (median,q1,q2)#, median, mad*1.4826)
def return_av_array(av_file, convert_from_ak=True):
'''
allows user to either pass in a list of avs
or a file containing source names and avs.
will convert from ak to av if told to
'''
if isinstance(av_file,str):
avs = np.loadtxt(os.path.expanduser(av_file))
if convert_from_ak:
avs[:, 1] = avs[:, 1] / .11
else:
avs=av_file
if convert_from_ak:
avs = avs / .11
return avs
# In[7]:
def get_stage(t, ratio):
'''
return stage for a table of models
'''
menv = t['MENV']
massc = t['MASSC']
mdot = t['MDOT']
mdisk = t['MDISK']
stageI = (mdot / massc >= 1.0e-6) & (menv / massc >= ratio)
stageII = (mdot / massc < 1.0e-6) & (mdisk / massc >= 1e-6)
stageIII = ((mdot / massc < 1.0e-6) & (mdisk / massc < 1e-6)) | ~(stageII | stageI)
if stageI:
return 'I'
elif stageII:
return 'II'
elif stageIII:
return 'III'
else:
return 'Fail'
def source_classifier(menv, massc, mdot, mdisk, chi2, ratio = 0.05):
'''
return class for a given set of models for a source
'''
menv = np.asarray(menv)
massc = np.asarray(massc)
mdot = np.asarray(mdot)
mdisk = np.asarray(mdisk)
stageI = (jj_median(mdot / massc,1./chi2) >= 1.0e-6) & (jj_median(menv / massc,1./chi2) >= ratio)
stageII = (jj_median(mdot / massc,1./chi2) < 1.0e-6) & (jj_median(mdisk / massc,1./chi2) >= 1e-6)
stageIII = (jj_median(mdot / massc,1./chi2) < 1.0e-6) & (jj_median(mdisk / massc,1./chi2) < 1e-6)
if stageI:
return 'P'
elif stageII:
return 'D'
elif stageIII:
return 'S'
else:
return 'Dex'
# In[8]:
def write_val(*nums):
'''
Prints out values appropriately scaled
'''
if len(nums)==1:
nums = nums[0]
if not hasattr(nums, '__iter__'):
nums = (nums,)
returns = ()
for num in nums:
if num == 0:
returns += ('%i' % num,)
elif (np.log10(num) < -2) ^ (np.log10(num) >= 3 ):
val = np.log10(num)
returns += ('$%0.1f \\times 10^{%i}$ ' % (10**np.mod(val,1),int(val)),)
else:
returns += ('%0.1f' % num,)
return returns
# In[9]:
def open_fitinfo(input_fits):
fin = FitInfoFile(input_fits, 'r')
t = load_parameter_table(fin.meta.model_dir)
t['MODEL_NAME'] = np.char.strip(t['MODEL_NAME'])
t = append_menv(t)
t.sort('MODEL_NAME')
source = []
for info in fin:
source.append(info)
return fin, source
# In[10]
def new_results_final(input_fits, verbose=True, output=True,
av_file=None, keep=('D', 1), convert_from_ak = True,
scale_chi2=True,fname='',prot_only=False, ratio=0.05):
print(('----- Analyzing %s ----'%(os.path.basename(input_fits))))
avs = return_av_array(av_file, convert_from_ak = convert_from_ak)
fin = FitInfoFile(input_fits, 'r')
source=[]
t = load_parameter_table(fin.meta.model_dir)
t['MODEL_NAME'] = np.char.strip(t['MODEL_NAME'])
t = append_menv(t)
t.sort('MODEL_NAME')
result, source, av, averr, infos, avints, incl, menvs, avlos, lum, mdot, massc, mdisk, stage, tstar, ndata = [], [], [], [], [], [], [], [], [], [], [], [], [], [], [],[]
tkeys = t.colnames[1:]
maxt = [b+'+1sig' for b in tkeys]
mint = [b+'-1sig' for b in tkeys]
pkeys = list(chain(*list(zip(tkeys,mint,maxt))))
keys = ['Source ID', 'class', 'av', 'scale','AVLOS'] + tkeys
dtypes = [t[n].dtype for n in tkeys]
[dtypes.insert(0,i) for i in [float,float,float,'|S5','|S5']]
output_table = Table(names = keys, masked=True,dtype=dtypes)
params = {k: [] for k in keys}
fname = fname + '%2.2f'%ratio
if output:
if fname is None:
if prot_only:
fname = '_prot_only'
fname = input_fits[:-8] + '_out_%s%i%s.md'%(keep[0],keep[1],fname)
else:
if prot_only:
fname = fname + '_prot_only'
fname = input_fits[:-8] + '_out_%s%i_%s.md'%(keep[0],keep[1],fname)
fout = open(fname , 'w')
fout.write("source file: %s"%input_fits)
fout.write("| Source | Class | $\chi^2_{best}$ | $N_{data}$ | $N_{fits}$ | $N_{P}$ | $N_{D}$ | $M\_{env}/M\_{\\star}$ ($\\times 10^{-2}$) | | $\\dot{M}/M_{\\star}$ ($\\times 10^{-6}$) | | $M_{\\star}$ | |\n")
fout.write("|:------:|:-----:|:----------------:|:-----------:|:-----------:|:-------:|:-------:|:------------------------------------------:|:-----:|:-----------------------------------------:|:-----:|:------------:|:-----:|\n")
fout.write("| | | | | | | | Median | Range | Median | Range | Median | Range |\n")
# ----------- Loop over fin -------------- #
for info in fin:
param = { k: [] for k in keys }
if not np.isnan(np.nanmean(info.av)):
source.append(copy.copy(info))
minchi=info.chi2.min()
if (scale_chi2) | (keep[0] == 'J'):
info.chi2 = info.chi2/minchi
keep=('D',keep[1])
info.keep(keep[0], keep[1])
param['Source ID'] = info.source.name
param['av'] = np.float32(get_av(info.source.name, avs))
tsorted = info.filter_table(t, additional={})
get_st = lambda tab: get_stage(tab,ratio=ratio)
stages = list(map(get_st, tsorted)) # get stages of selected fits
stageI = np.array(stages) == 'I'
stageII = (np.array(stages) == 'II') | (np.array(stages) == 'III')
nfits=np.sum(stageI)+np.sum(stageII)
# create lists that contain source properties of interest
#source.append(info)
#infos.append(info)
ndata.append(info.source.n_data)
av.append(get_av(info.source.name, avs))
#avints.append(tsorted['AV_INT'].tolist())
#avlos.append(info.av)
#incl.append(tsorted['INCL.'].tolist())
#lum.append(tsorted['LTOT'].tolist())
mdot.append(tsorted['MDOT'].tolist())
massc.append(tsorted['MASSC'].tolist())
mdisk.append(tsorted['MDISK'].tolist())
#tstar.append(tsorted['TSTAR'].tolist())
menvs.append(tsorted['MENV'].tolist())
# determine class based on quantities of interest using classification criteria
# these classification criteria may be different than what is required for
# identfying Model stages. Will return a single class
classification = source_classifier(menvs[-1], massc[-1], mdot[-1], mdisk[-1],info.chi2, ratio=ratio)
result.append(classification)
# ------------ Print important parameters to file -----------#
if prot_only:
writeout = output and (result[-1]=='P')
else:
writeout = output
if writeout:
fout.write('| %s ' % info.source.name) # source name
fout.write(' | ')
fout.write('%s ' % result[-1]) # source class
fout.write(' | ')
fout.write('%5.1f' % (minchi)) # lowest chi^2 value
fout.write(' | ')
fout.write('%3i' % (ndata[-1])) # number of data points
fout.write(' | ')
fout.write('%3i' % nfits) # number of fits in range
fout.write(' | ')
fout.write('%3i' % np.sum(stageI)) # number of stage I
fout.write(' | ')
fout.write('%3i' % np.sum(stageII)) # number of stage II
fout.write(' | ')
#Menv/Massc
qavg, qmin, qmax = jj_avg_std(np.array(menvs[-1])/np.array(massc[-1]), 1./info.chi2)
power = -2
factor = 10.**power
fout.write('%s' % write_val(qavg/factor) )
fout.write(' | ')
fout.write('(%s $-$ %s)' % (write_val(qmin/factor, qmax/factor)) )
#fout.write(' %0.f2 ' % (np.log10(qmin)-np.log10(qavg)) )
fout.write(' | ')
#Mdot/Massc
qavg, qmin, qmax = jj_avg_std(np.array(mdot[-1])/np.array(massc[-1]), 1./info.chi2)
power = -6
factor = 10.**power
fout.write('%s' % write_val(qavg/factor))
fout.write(' | ')
fout.write('(%s $-$ %s)' % (write_val(qmin/factor,qmax/factor)) )
fout.write(' | ')
#Massc
qavg, qmin, qmax = jj_avg_std(massc[-1], 1./info.chi2)
fout.write('%s' % write_val(qavg) )
fout.write(' | ')
fout.write('(%s $-$ %s)' % (write_val(qmin,qmax)) )
fout.write(' | ')
fout.write('\n')
# ------------ [END] Print important parameters to file ----------- #
param['class'] = classification
med, q1, q2 = jj_avg_std(info.av,1./info.chi2)
#param['AVLOS-1sig'] = q1
param['AVLOS'] = med
#param['AVLOS+1sig'] = q2
param['scale'] = wmedian(info.sc,1./info.chi2)
for k in tkeys:
med, q1, q2 = jj_avg_std(tsorted[k].__array__(),1./info.chi2)
param[k] = med
#param[k+'-1sig'] = q1
#param[k+'+1sig'] = q2
else:
param['Source ID'] = info.source.name
param['av'] = np.float32(get_av(info.source.name, avs))
param['scale'] = np.nan
param['class'] = 'U'
#param['AVLOS-1sig'] = np.nan
param['AVLOS'] = np.nan
#param['AVLOS+1sig'] = np.nan
for k in tkeys:
#param[k+'-1sig'] = np.nan
param[k] = np.nan
#param[k+'+1sig'] = np.nan
output_table.add_row(param)
for k in tkeys:
params[k].append(param[k])
# ----------- [END] Loop over fin -------------- #
if output:
fout.close()
#t = Table(params)
#t = t[keys]
return output_table,source
def get_sed():
# stub
return None
| |
#!/usr/bin/env python
##
## Licensed to the Apache Software Foundation (ASF) under one
## or more contributor license agreements. See the NOTICE file
## distributed with this work for additional information
## regarding copyright ownership. The ASF licenses this file
## to you under the Apache License, Version 2.0 (the
## "License"); you may not use this file except in compliance
## with the License. You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing,
## software distributed under the License is distributed on an
## "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
## KIND, either express or implied. See the License for the
## specific language governing permissions and limitations
## under the License.
##
import cgi
import cgitb
from proton import Messenger, Message, Timeout
from time import strftime, gmtime
updates_enabled = True
logfile = '/var/log/qdweb'
cgitb.enable()
def qdw_start(ctype):
print "Content-Type: %s" % ctype
print
def qdw_title(title):
print '<html>'
print '<head>'
print ' <title>%s</title>' % title
print ' <link rel="stylesheet" type="text/css" href="/css/qdweb.css" />'
print '</head>'
print '<body>'
def qdw_close():
print '<center>'
print 'Powered by <a href="http://qpid.apache.org/components/dispatch-router/index.html">Apache Qpid Dispatch</a> <img src="/feather.png" />'
print '</center>'
print '</body>'
print '</html>'
def qdw_table(rows, heads=None, caption=None):
if caption:
print "<center><b>%s</b></center>" % caption
print '<center><table cellpadding="4">'
if heads:
print ' <tr class="header">'
for h in heads:
print " <td>%s</td>" % h
print " </tr>"
ordinal = 0
for row in rows:
if (ordinal / 2) * 2 == ordinal:
print ' <tr>'
else:
print ' <tr class="alt">'
for r in row:
print " <td>%s</td>" % str(r)
print " </tr>"
ordinal += 1
print "</table></center>"
def qdw_yn(val, valid=True):
if not valid:
return "--"
if val:
return "Yes"
return "No"
def qdw_val(val, valid=True):
if not valid:
return "--"
return val
def qdw_maybe(val):
if val:
return val
return "--"
def qdw_first_line(text):
text = text.replace('<br ', '\n').replace('<p ', '\n')
return text.split('\n')[0]
def qdw_log_query(text):
fd = open(logfile, "a")
fd.write(text)
fd.write('\n')
fd.close()
def qdw_do_action(db, form):
action = form['action'].value
def qdw_menu():
print '<center>'
print '<table>'
print ' <tr><td><font size="+2">'
print ' <a href="qdweb.py">Main Page</a> '
print ' <a href="qdweb.py?view=CONN">Connections</a> '
print ' <a href="qdweb.py?view=LINK">Links</a> '
print ' <a href="qdweb.py?view=ADDRESS">Addresses</a> '
print ' <a href="qdweb.py?view=MEMORY">Memory</a> '
print ' </td></tr>'
print '</table></center><p /><hr width="85%"><p />'
def addr_class(addr):
if not addr:
return "-"
if addr[0] == 'M' : return "mobile"
if addr[0] == 'R' : return "router"
if addr[0] == 'A' : return "area"
if addr[0] == 'L' : return "local"
return "unknown: %s" % addr[0]
def addr_text(addr):
if not addr:
return "-"
return addr[1:]
class GeneralPage:
def __init__(self, router, form):
self.router = router
self.form = form
def display(self):
qdw_title("Qpid Dispatch Router in Docker - Main Page")
qdw_menu()
data = self.router.GetObject('org.apache.qpid.dispatch.router')[0]
rows = []
rows.append(["Mode", data.mode])
rows.append(["Area", data.area])
rows.append(["Router ID", data.router_id])
rows.append(["Address Count", data.addr_count])
rows.append(["Link Count", data.link_count])
rows.append(["Node Count", data.node_count])
qdw_table(rows, caption="Router Information")
print '<hr width="85%"><p />'
class ConnPage:
def __init__(self, router, form):
self.router = router
self.form = form
def display(self):
qdw_title("Qpid Dispatch Router in Docker - Connections Page")
qdw_menu()
hdr = []
hdr.append(Header("state"))
hdr.append(Header("host"))
hdr.append(Header("container"))
hdr.append(Header("sasl-mechanisms"))
hdr.append(Header("role"))
hdr.append(Header("dir"))
data = self.router.GetObject('org.apache.qpid.dispatch.connection')
rows = []
for conn in data:
row = []
row.append(conn.state)
row.append(conn.host)
row.append(conn.container)
row.append(conn.sasl)
row.append(conn.role)
row.append(conn.dir)
rows.append(row)
qdw_table(rows, heads=hdr, caption="Connections")
print '<hr width="85%"><p />'
class LinkPage:
def __init__(self, router, form):
self.router = router
self.form = form
def display(self):
qdw_title("Qpid Dispatch Router in Docker - Links Page")
qdw_menu()
hdr = []
hdr.append(Header("type"))
hdr.append(Header("dir"))
hdr.append(Header("rindex"))
hdr.append(Header("class"))
hdr.append(Header("addr"))
data = self.router.GetObject('org.apache.qpid.dispatch.router.link')
rows = []
for link in data:
row = []
row.append(link.link_type)
row.append(link.link_dir)
if link.link_type == "inter-router":
row.append(link.index)
else:
row.append('-')
row.append(addr_class(link.owning_addr))
row.append(addr_text(link.owning_addr))
rows.append(row)
qdw_table(rows, heads=hdr, caption="Router Links")
print '<hr width="85%"><p />'
class AddressPage:
def __init__(self, router, form):
self.router = router
self.form = form
def display(self):
qdw_title("Qpid Dispatch Router in Docker - Address Page")
qdw_menu()
hdr = []
hdr.append(Header("class"))
hdr.append(Header("address"))
hdr.append(Header("in-proc", Header.Y))
hdr.append(Header("local", Header.COMMAS))
hdr.append(Header("remote", Header.COMMAS))
hdr.append(Header("in", Header.COMMAS))
hdr.append(Header("out", Header.COMMAS))
hdr.append(Header("thru", Header.COMMAS))
hdr.append(Header("to-proc", Header.COMMAS))
hdr.append(Header("from-proc", Header.COMMAS))
data = router.GetObject('org.apache.qpid.dispatch.router.address')
rows = []
for addr in data:
row = []
row.append(addr_class(addr.addr))
row.append(addr_text(addr.addr))
row.append(addr.in_process)
row.append(addr.subscriber_count)
row.append(addr.remote_count)
row.append(addr.deliveries_ingress)
row.append(addr.deliveries_egress)
row.append(addr.deliveries_transit)
row.append(addr.deliveries_to_container)
row.append(addr.deliveries_from_container)
rows.append(row)
title = "Router Addresses"
sorter = Sorter(hdr, rows, 'address', 0, True)
dispRows = sorter.getSorted()
qdw_table(dispRows, heads=hdr, caption="Router Addresses")
print '<hr width="85%"><p />'
class MemoryPage:
def __init__(self, router, form):
self.router = router
self.form = form
def display(self):
qdw_title("Qpid Dispatch Router in Docker - Memory Page")
qdw_menu()
hdr = []
hdr.append(Header("type"))
hdr.append(Header("size", Header.COMMAS))
hdr.append(Header("batch"))
hdr.append(Header("thread-max", Header.COMMAS))
hdr.append(Header("total", Header.COMMAS))
hdr.append(Header("in-threads", Header.COMMAS))
hdr.append(Header("rebal-in", Header.COMMAS))
hdr.append(Header("rebal-out", Header.COMMAS))
data = router.GetObject('org.apache.qpid.dispatch.allocator')
rows = []
for t in data:
row = []
row.append(t.name)
row.append(t.type_size)
row.append(t.transfer_batch_size)
row.append(t.local_free_list_max)
row.append(t.total_alloc_from_heap)
row.append(t.held_by_threads)
row.append(t.batches_rebalanced_to_threads)
row.append(t.batches_rebalanced_to_global)
rows.append(row)
title = "Types"
sorter = Sorter(hdr, rows, 'type', 0, True)
dispRows = sorter.getSorted()
qdw_table(dispRows, heads=hdr, caption="Router Memory Statistics")
print '<hr width="85%"><p />'
def YN(val):
if val:
return 'Y'
return 'N'
def Commas(value):
sval = str(value)
result = ""
while True:
if len(sval) == 0:
return result
left = sval[:-3]
right = sval[-3:]
result = right + result
if len(left) > 0:
result = ',' + result
sval = left
def TimeLong(value):
return strftime("%c", gmtime(value / 1000000000))
def TimeShort(value):
return strftime("%X", gmtime(value / 1000000000))
class Header:
""" """
NONE = 1
KMG = 2
YN = 3
Y = 4
TIME_LONG = 5
TIME_SHORT = 6
DURATION = 7
COMMAS = 8
def __init__(self, text, format=NONE):
self.text = text
self.format = format
def __repr__(self):
return self.text
def __str__(self):
return self.text
def formatted(self, value):
try:
if value == None:
return ''
if self.format == Header.NONE:
return value
if self.format == Header.KMG:
return self.num(value)
if self.format == Header.YN:
if value:
return 'Y'
return 'N'
if self.format == Header.Y:
if value:
return 'Y'
return ''
if self.format == Header.TIME_LONG:
return TimeLong(value)
if self.format == Header.TIME_SHORT:
return TimeShort(value)
if self.format == Header.DURATION:
if value < 0: value = 0
sec = value / 1000000000
min = sec / 60
hour = min / 60
day = hour / 24
result = ""
if day > 0:
result = "%dd " % day
if hour > 0 or result != "":
result += "%dh " % (hour % 24)
if min > 0 or result != "":
result += "%dm " % (min % 60)
result += "%ds" % (sec % 60)
return result
if self.format == Header.COMMAS:
return Commas(value)
except:
return "?"
def numCell(self, value, tag):
fp = float(value) / 1000.
if fp < 10.0:
return "%1.2f%c" % (fp, tag)
if fp < 100.0:
return "%2.1f%c" % (fp, tag)
return "%4d%c" % (value / 1000, tag)
def num(self, value):
if value < 1000:
return "%4d" % value
if value < 1000000:
return self.numCell(value, 'k')
value /= 1000
if value < 1000000:
return self.numCell(value, 'm')
value /= 1000
return self.numCell(value, 'g')
class Display:
""" Display formatting """
def __init__(self, spacing=2, prefix=" "):
self.tableSpacing = spacing
self.tablePrefix = prefix
self.timestampFormat = "%X"
def formattedTable(self, title, heads, rows):
fRows = []
for row in rows:
fRow = []
col = 0
for cell in row:
fRow.append(heads[col].formatted(cell))
col += 1
fRows.append(fRow)
headtext = []
for head in heads:
headtext.append(head.text)
self.table(title, headtext, fRows)
def table(self, title, heads, rows):
""" Print a table with autosized columns """
# Pad the rows to the number of heads
for row in rows:
diff = len(heads) - len(row)
for idx in range(diff):
row.append("")
print title
if len (rows) == 0:
return
colWidth = []
col = 0
line = self.tablePrefix
for head in heads:
width = len (head)
for row in rows:
text = row[col]
if text.__class__ == str:
text = text.decode('utf-8')
cellWidth = len(unicode(text))
if cellWidth > width:
width = cellWidth
colWidth.append (width + self.tableSpacing)
line = line + head
if col < len (heads) - 1:
for i in range (colWidth[col] - len (head)):
line = line + " "
col = col + 1
print line
line = self.tablePrefix
for width in colWidth:
for i in range (width):
line = line + "="
print line
for row in rows:
line = self.tablePrefix
col = 0
for width in colWidth:
text = row[col]
if text.__class__ == str:
text = text.decode('utf-8')
line = line + unicode(text)
if col < len (heads) - 1:
for i in range (width - len(unicode(text))):
line = line + " "
col = col + 1
print line
def do_setTimeFormat (self, fmt):
""" Select timestamp format """
if fmt == "long":
self.timestampFormat = "%c"
elif fmt == "short":
self.timestampFormat = "%X"
def timestamp (self, nsec):
""" Format a nanosecond-since-the-epoch timestamp for printing """
return strftime (self.timestampFormat, gmtime (nsec / 1000000000))
def duration(self, nsec):
if nsec < 0: nsec = 0
sec = nsec / 1000000000
min = sec / 60
hour = min / 60
day = hour / 24
result = ""
if day > 0:
result = "%dd " % day
if hour > 0 or result != "":
result += "%dh " % (hour % 24)
if min > 0 or result != "":
result += "%dm " % (min % 60)
result += "%ds" % (sec % 60)
return result
class Sortable:
""" """
def __init__(self, row, sortIndex):
self.row = row
self.sortIndex = sortIndex
if sortIndex >= len(row):
raise Exception("sort index exceeds row boundary")
def __cmp__(self, other):
return cmp(self.row[self.sortIndex], other.row[self.sortIndex])
def getRow(self):
return self.row
class Sorter:
""" """
def __init__(self, heads, rows, sortCol, limit=0, inc=True):
col = 0
for head in heads:
if head.text == sortCol:
break
col += 1
if col == len(heads):
raise Exception("sortCol '%s', not found in headers" % sortCol)
list = []
for row in rows:
list.append(Sortable(row, col))
list.sort()
if not inc:
list.reverse()
count = 0
self.sorted = []
for row in list:
self.sorted.append(row.getRow())
count += 1
if count == limit:
break
def getSorted(self):
return self.sorted
class AmqpEntity(object):
def __init__(self, values):
self.values = {}
for k,v in values.items():
self.values[k.replace('-', '_')] = v
def __getattr__(self, attr):
if attr in self.values:
return self.values[attr]
raise Exception("Unknown attribute: %s" % attr)
def __repr__(self):
return "%r" % self.values
class BusManager:
def __init__(self):
pass
def SetHost(self, host, router):
self.M = Messenger()
self.M.start()
self.M.timeout = 3
self.M.route("amqp:/*", "amqp://%s/$1" % host)
if router:
self.address = "amqp:/_topo/0/%s/$management" % router
else:
self.address = "amqp:/_local/$management"
self.subscription = self.M.subscribe("amqp:/#")
self.reply = self.subscription.address
def Disconnect(self):
self.M.stop()
def GetObject(self, cls):
request = Message()
response = Message()
request.address = self.address
request.reply_to = self.reply
request.correlation_id = 1
request.properties = {u'operation':u'GET', u'type':cls}
request.body = {'attributeNames': []}
self.M.put(request)
self.M.send()
self.M.recv()
self.M.get(response)
if response.properties['status-code'] != 200:
raise Exception("Agent reports: %d %s" % (response.properties['status-code'], response.properties['status-description']))
entities = []
results = response.body
for e in results:
entities.append(AmqpEntity(e))
return entities
qdw_start("text/html")
form = cgi.FieldStorage()
router = BusManager()
router.SetHost("0.0.0.0", None)
if 'action' in form:
if updates_enabled:
qdw_do_action(router, form)
page = None
if 'view' in form:
view = form['view'].value
if view == 'GENERAL':
page = GeneralPage(router, form)
elif view == 'CONN':
page = ConnPage(router, form)
elif view == 'LINK':
page = LinkPage(router, form)
elif view == 'ADDRESS':
page = AddressPage(router, form)
elif view == 'MEMORY':
page = MemoryPage(router, form)
else:
page = GeneralPage(router, form)
else:
page = GeneralPage(router, form)
page.display()
router.Disconnect()
qdw_close()
| |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
r"""Script for training model.
Simple command to get up and running:
python train.py --memory_size=8192 \
--batch_size=16 --validation_length=50 \
--episode_width=5 --episode_length=30
"""
import logging
import os
import random
import numpy as np
import tensorflow as tf
import data_utils
import model
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer('rep_dim', 128,
'dimension of keys to use in memory')
tf.flags.DEFINE_integer('episode_length', 100, 'length of episode')
tf.flags.DEFINE_integer('episode_width', 5,
'number of distinct labels in a single episode')
tf.flags.DEFINE_integer('memory_size', None, 'number of slots in memory. '
'Leave as None to default to episode length')
tf.flags.DEFINE_integer('batch_size', 16, 'batch size')
tf.flags.DEFINE_integer('num_episodes', 100000, 'number of training episodes')
tf.flags.DEFINE_integer('validation_frequency', 20,
'every so many training episodes, '
'assess validation accuracy')
tf.flags.DEFINE_integer('validation_length', 10,
'number of episodes to use to compute '
'validation accuracy')
tf.flags.DEFINE_integer('seed', 888, 'random seed for training sampling')
tf.flags.DEFINE_string('save_dir', '', 'directory to save model to')
tf.flags.DEFINE_bool('use_lsh', False,
'use locality-sensitive hashing '
'(NOTE: not fully tested)')
class Trainer(object):
"""Class that takes care of training, validating, and checkpointing model."""
def __init__(self, train_data, valid_data, input_dim, output_dim=None):
self.train_data = train_data
self.valid_data = valid_data
self.input_dim = input_dim
self.rep_dim = FLAGS.rep_dim
self.episode_length = FLAGS.episode_length
self.episode_width = FLAGS.episode_width
self.batch_size = FLAGS.batch_size
self.memory_size = (self.episode_length * self.batch_size
if FLAGS.memory_size is None else FLAGS.memory_size)
self.use_lsh = FLAGS.use_lsh
self.output_dim = (output_dim if output_dim is not None
else self.episode_width)
def get_model(self):
# vocab size is the number of distinct values that
# could go into the memory key-value storage
vocab_size = self.episode_width * self.batch_size
return model.Model(
self.input_dim, self.output_dim, self.rep_dim, self.memory_size,
vocab_size, use_lsh=self.use_lsh)
def sample_episode_batch(self, data,
episode_length, episode_width, batch_size):
"""Generates a random batch for training or validation.
Structures each element of the batch as an 'episode'.
Each episode contains episode_length examples and
episode_width distinct labels.
Args:
data: A dictionary mapping label to list of examples.
episode_length: Number of examples in each episode.
episode_width: Distinct number of labels in each episode.
batch_size: Batch size (number of episodes).
Returns:
A tuple (x, y) where x is a list of batches of examples
with size episode_length and y is a list of batches of labels.
"""
episodes_x = [[] for _ in xrange(episode_length)]
episodes_y = [[] for _ in xrange(episode_length)]
assert len(data) >= episode_width
keys = data.keys()
for b in xrange(batch_size):
episode_labels = random.sample(keys, episode_width)
remainder = episode_length % episode_width
remainders = [0] * (episode_width - remainder) + [1] * remainder
episode_x = [
random.sample(data[lab],
r + (episode_length - remainder) / episode_width)
for lab, r in zip(episode_labels, remainders)]
episode = sum([[(x, i, ii) for ii, x in enumerate(xx)]
for i, xx in enumerate(episode_x)], [])
random.shuffle(episode)
# Arrange episode so that each distinct label is seen before moving to
# 2nd showing
episode.sort(key=lambda elem: elem[2])
assert len(episode) == episode_length
for i in xrange(episode_length):
episodes_x[i].append(episode[i][0])
episodes_y[i].append(episode[i][1] + b * episode_width)
return ([np.array(xx).astype('float32') for xx in episodes_x],
[np.array(yy).astype('int32') for yy in episodes_y])
def compute_correct(self, ys, y_preds):
return np.mean(np.equal(y_preds, np.array(ys)))
def individual_compute_correct(self, y, y_pred):
return y_pred == y
def run(self):
"""Performs training.
Trains a model using episodic training.
Every so often, runs some evaluations on validation data.
"""
train_data, valid_data = self.train_data, self.valid_data
input_dim, output_dim = self.input_dim, self.output_dim
rep_dim, episode_length = self.rep_dim, self.episode_length
episode_width, memory_size = self.episode_width, self.memory_size
batch_size = self.batch_size
train_size = len(train_data)
valid_size = len(valid_data)
logging.info('train_size (number of labels) %d', train_size)
logging.info('valid_size (number of labels) %d', valid_size)
logging.info('input_dim %d', input_dim)
logging.info('output_dim %d', output_dim)
logging.info('rep_dim %d', rep_dim)
logging.info('episode_length %d', episode_length)
logging.info('episode_width %d', episode_width)
logging.info('memory_size %d', memory_size)
logging.info('batch_size %d', batch_size)
assert all(len(v) >= float(episode_length) / episode_width
for v in train_data.itervalues())
assert all(len(v) >= float(episode_length) / episode_width
for v in valid_data.itervalues())
output_dim = episode_width
self.model = self.get_model()
self.model.setup()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver(max_to_keep=10)
ckpt = None
if FLAGS.save_dir:
ckpt = tf.train.get_checkpoint_state(FLAGS.save_dir)
if ckpt and ckpt.model_checkpoint_path:
logging.info('restoring from %s', ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
logging.info('starting now')
losses = []
random.seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
for i in xrange(FLAGS.num_episodes):
x, y = self.sample_episode_batch(
train_data, episode_length, episode_width, batch_size)
outputs = self.model.episode_step(sess, x, y, clear_memory=True)
loss = outputs
losses.append(loss)
if i % FLAGS.validation_frequency == 0:
logging.info('episode batch %d, avg train loss %f',
i, np.mean(losses))
losses = []
# validation
correct = []
correct_by_shot = dict((k, []) for k in xrange(self.episode_width + 1))
for _ in xrange(FLAGS.validation_length):
x, y = self.sample_episode_batch(
valid_data, episode_length, episode_width, 1)
outputs = self.model.episode_predict(
sess, x, y, clear_memory=True)
y_preds = outputs
correct.append(self.compute_correct(np.array(y), y_preds))
# compute per-shot accuracies
seen_counts = [[0] * episode_width for _ in xrange(batch_size)]
# loop over episode steps
for yy, yy_preds in zip(y, y_preds):
# loop over batch examples
for k, (yyy, yyy_preds) in enumerate(zip(yy, yy_preds)):
yyy, yyy_preds = int(yyy), int(yyy_preds)
count = seen_counts[k][yyy % self.episode_width]
if count in correct_by_shot:
correct_by_shot[count].append(
self.individual_compute_correct(yyy, yyy_preds))
seen_counts[k][yyy % self.episode_width] = count + 1
logging.info('validation overall accuracy %f', np.mean(correct))
logging.info('%d-shot: %.3f, ' * (self.episode_width + 1),
*sum([[k, np.mean(correct_by_shot[k])]
for k in xrange(self.episode_width + 1)], []))
if saver and FLAGS.save_dir:
saved_file = saver.save(sess,
os.path.join(FLAGS.save_dir, 'model.ckpt'),
global_step=self.model.global_step)
logging.info('saved model to %s', saved_file)
def main(unused_argv):
train_data, valid_data = data_utils.get_data()
trainer = Trainer(train_data, valid_data, data_utils.IMAGE_NEW_SIZE ** 2)
trainer.run()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
tf.app.run()
| |
"""The tests for Cover device conditions."""
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.cover import DOMAIN
from homeassistant.const import (
CONF_PLATFORM,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
)
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automation_capabilities,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
from tests.components.blueprint.conftest import stub_blueprint_populate # noqa: F401
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_conditions(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a cover."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
ent = platform.ENTITIES[0]
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN, "test", ent.unique_id, device_id=device_entry.id
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "is_open",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_{ent.unique_id}",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_closed",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_{ent.unique_id}",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_opening",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_{ent.unique_id}",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_closing",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_{ent.unique_id}",
},
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert_lists_same(conditions, expected_conditions)
async def test_get_conditions_set_pos(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a cover."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
ent = platform.ENTITIES[1]
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN, "test", ent.unique_id, device_id=device_entry.id
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "is_open",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_{ent.unique_id}",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_closed",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_{ent.unique_id}",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_opening",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_{ent.unique_id}",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_closing",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_{ent.unique_id}",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_position",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_{ent.unique_id}",
},
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert_lists_same(conditions, expected_conditions)
async def test_get_conditions_set_tilt_pos(hass, device_reg, entity_reg):
"""Test we get the expected conditions from a cover."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
ent = platform.ENTITIES[2]
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN, "test", ent.unique_id, device_id=device_entry.id
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_conditions = [
{
"condition": "device",
"domain": DOMAIN,
"type": "is_open",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_{ent.unique_id}",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_closed",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_{ent.unique_id}",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_opening",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_{ent.unique_id}",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_closing",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_{ent.unique_id}",
},
{
"condition": "device",
"domain": DOMAIN,
"type": "is_tilt_position",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_{ent.unique_id}",
},
]
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert_lists_same(conditions, expected_conditions)
async def test_get_condition_capabilities(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a cover condition."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
ent = platform.ENTITIES[0]
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN, "test", ent.unique_id, device_id=device_entry.id
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert len(conditions) == 4
for condition in conditions:
capabilities = await async_get_device_automation_capabilities(
hass, "condition", condition
)
assert capabilities == {"extra_fields": []}
async def test_get_condition_capabilities_set_pos(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a cover condition."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
ent = platform.ENTITIES[1]
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN, "test", ent.unique_id, device_id=device_entry.id
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_capabilities = {
"extra_fields": [
{
"name": "above",
"optional": True,
"type": "integer",
"default": 0,
"valueMax": 100,
"valueMin": 0,
},
{
"name": "below",
"optional": True,
"type": "integer",
"default": 100,
"valueMax": 100,
"valueMin": 0,
},
]
}
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert len(conditions) == 5
for condition in conditions:
capabilities = await async_get_device_automation_capabilities(
hass, "condition", condition
)
if condition["type"] == "is_position":
assert capabilities == expected_capabilities
else:
assert capabilities == {"extra_fields": []}
async def test_get_condition_capabilities_set_tilt_pos(hass, device_reg, entity_reg):
"""Test we get the expected capabilities from a cover condition."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
ent = platform.ENTITIES[2]
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(
DOMAIN, "test", ent.unique_id, device_id=device_entry.id
)
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
expected_capabilities = {
"extra_fields": [
{
"name": "above",
"optional": True,
"type": "integer",
"default": 0,
"valueMax": 100,
"valueMin": 0,
},
{
"name": "below",
"optional": True,
"type": "integer",
"default": 100,
"valueMax": 100,
"valueMin": 0,
},
]
}
conditions = await async_get_device_automations(hass, "condition", device_entry.id)
assert len(conditions) == 5
for condition in conditions:
capabilities = await async_get_device_automation_capabilities(
hass, "condition", condition
)
if condition["type"] == "is_tilt_position":
assert capabilities == expected_capabilities
else:
assert capabilities == {"extra_fields": []}
async def test_if_state(hass, calls):
"""Test for turn_on and turn_off conditions."""
hass.states.async_set("cover.entity", STATE_OPEN)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "cover.entity",
"type": "is_open",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_open - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "cover.entity",
"type": "is_closed",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_closed - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event3"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "cover.entity",
"type": "is_opening",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_opening - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event4"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "cover.entity",
"type": "is_closing",
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_closing - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
]
},
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "is_open - event - test_event1"
hass.states.async_set("cover.entity", STATE_CLOSED)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "is_closed - event - test_event2"
hass.states.async_set("cover.entity", STATE_OPENING)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event3")
await hass.async_block_till_done()
assert len(calls) == 3
assert calls[2].data["some"] == "is_opening - event - test_event3"
hass.states.async_set("cover.entity", STATE_CLOSING)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event4")
await hass.async_block_till_done()
assert len(calls) == 4
assert calls[3].data["some"] == "is_closing - event - test_event4"
async def test_if_position(hass, calls):
"""Test for position conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
ent = platform.ENTITIES[1]
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent.entity_id,
"type": "is_position",
"above": 45,
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_pos_gt_45 - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent.entity_id,
"type": "is_position",
"below": 90,
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_pos_lt_90 - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event3"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent.entity_id,
"type": "is_position",
"above": 45,
"below": 90,
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_pos_gt_45_lt_90 - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
]
},
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
hass.bus.async_fire("test_event3")
await hass.async_block_till_done()
assert len(calls) == 3
assert calls[0].data["some"] == "is_pos_gt_45 - event - test_event1"
assert calls[1].data["some"] == "is_pos_lt_90 - event - test_event2"
assert calls[2].data["some"] == "is_pos_gt_45_lt_90 - event - test_event3"
hass.states.async_set(
ent.entity_id, STATE_CLOSED, attributes={"current_position": 45}
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
hass.bus.async_fire("test_event3")
await hass.async_block_till_done()
assert len(calls) == 4
assert calls[3].data["some"] == "is_pos_lt_90 - event - test_event2"
hass.states.async_set(
ent.entity_id, STATE_CLOSED, attributes={"current_position": 90}
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
hass.bus.async_fire("test_event3")
await hass.async_block_till_done()
assert len(calls) == 5
assert calls[4].data["some"] == "is_pos_gt_45 - event - test_event1"
async def test_if_tilt_position(hass, calls):
"""Test for tilt position conditions."""
platform = getattr(hass.components, f"test.{DOMAIN}")
platform.init()
ent = platform.ENTITIES[2]
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {CONF_PLATFORM: "test"}})
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": "event", "event_type": "test_event1"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent.entity_id,
"type": "is_tilt_position",
"above": 45,
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_pos_gt_45 - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event2"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent.entity_id,
"type": "is_tilt_position",
"below": 90,
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_pos_lt_90 - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
{
"trigger": {"platform": "event", "event_type": "test_event3"},
"condition": [
{
"condition": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": ent.entity_id,
"type": "is_tilt_position",
"above": 45,
"below": 90,
}
],
"action": {
"service": "test.automation",
"data_template": {
"some": "is_pos_gt_45_lt_90 - {{ trigger.platform }} - {{ trigger.event.event_type }}"
},
},
},
]
},
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
hass.bus.async_fire("test_event3")
await hass.async_block_till_done()
assert len(calls) == 3
assert calls[0].data["some"] == "is_pos_gt_45 - event - test_event1"
assert calls[1].data["some"] == "is_pos_lt_90 - event - test_event2"
assert calls[2].data["some"] == "is_pos_gt_45_lt_90 - event - test_event3"
hass.states.async_set(
ent.entity_id, STATE_CLOSED, attributes={"current_tilt_position": 45}
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
hass.bus.async_fire("test_event3")
await hass.async_block_till_done()
assert len(calls) == 4
assert calls[3].data["some"] == "is_pos_lt_90 - event - test_event2"
hass.states.async_set(
ent.entity_id, STATE_CLOSED, attributes={"current_tilt_position": 90}
)
hass.bus.async_fire("test_event1")
hass.bus.async_fire("test_event2")
hass.bus.async_fire("test_event3")
await hass.async_block_till_done()
assert len(calls) == 5
assert calls[4].data["some"] == "is_pos_gt_45 - event - test_event1"
| |
# Copyright 2018 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The training loop."""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import time
from absl import flags
from absl import logging
from lamb import corpus
from lamb import evaluation
from lamb import lamb_flags
from lamb import lm
from lamb import monitoring
from lamb import utils
from lamb.averaged import Averaged
from lamb.dyneval import Dyneval
import numpy as np
import six
import tensorflow.compat.v1 as tf
from tensorflow.contrib import framework as contrib_framework
nest = contrib_framework.nest
FLAGS = flags.FLAGS
def _load_checkpoint(checkpoint_filename, extra_vars, trainable_only=False):
if tf.gfile.IsDirectory(checkpoint_filename):
checkpoint_filename = tf.train.latest_checkpoint(checkpoint_filename)
logging.info('Loading checkpoint %s', checkpoint_filename)
saveables = (tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) +
tf.get_collection(tf.GraphKeys.SAVEABLE_OBJECTS))
if trainable_only:
saveables = list(set(saveables) & set(tf.trainable_variables()))
# Try to restore all saveables, if that fails try without extra_vars.
try:
saver = tf.train.Saver(var_list=saveables)
saver.restore(tf.get_default_session(), checkpoint_filename)
except (ValueError, tf.errors.NotFoundError):
logging.info('Missing key in checkpoint. Trying old checkpoint format.')
saver = tf.train.Saver(var_list=list(set(saveables) - set(extra_vars)))
saver.restore(tf.get_default_session(), checkpoint_filename)
def train(tuner, data, vocab, config, experiment_dir, seed=None):
"""Main training loop.
Args:
tuner: .
data: .
vocab: .
config: A config object (see get_config()).
experiment_dir: Path of a directory where to log training events.
seed: suitable for tf.set_random_seed
Returns:
The second return value of _maybe_report_measure.
"""
if FLAGS.save_config:
config.save(os.path.join(experiment_dir, 'config'))
session_config = tf.ConfigProto(
log_device_placement=FLAGS.log_device_placement)
with tf.Graph().as_default():
tf.set_random_seed(seed)
logging.info('Creating the model.')
config = lamb_flags.handle_config_defaults(config, lm.LM.num_params)
model = lm.LM(config)
logging.info('Model created.')
if FLAGS.trigger_averaging_turns >= 0:
averaged = Averaged(tf.trainable_variables())
else:
averaged = None
# The monitor and the lr scheduler have some state that we need to
# checkpoint in case of preemption. We do that by serializing them into the
# graph.
training_state = utils.TFSerializer('training_state')
def sync_training_state_from_graph():
state = training_state.retrieve()
logging.info('Loaded training state: %s', state)
if state.get('monitor_state', None):
monitor.set_state(state['monitor_state'])
if state.get('learning_rate_state', None):
lr_scheduler.set_state(state['learning_rate_state'])
def sync_training_state_to_graph():
state = {
# To help maintain backwards compatibility.
'state_version': 1,
'monitor_state': monitor.state(),
'learning_rate_state': lr_scheduler.state()
}
training_state.store(state)
# Checkpoint saving.
logging.info('Creating savers.')
best_turn_saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True)
last_turn_saver = tf.train.Saver(max_to_keep=1, save_relative_paths=True)
best_checkpoint_dir = os.path.join(experiment_dir, 'best/')
last_checkpoint_dir = os.path.join(experiment_dir, 'last/')
best_checkpoint_filename = os.path.join(best_checkpoint_dir, 'model.ckpt')
last_checkpoint_filename = os.path.join(last_checkpoint_dir, 'model.ckpt')
# Upon resuming from a checkpoint the saver won't count the old checkpoints
# against max_to_keep. Recover its state.
best_checkpoint_states = tf.train.get_checkpoint_state(best_checkpoint_dir)
last_checkpoint_states = tf.train.get_checkpoint_state(last_checkpoint_dir)
if best_checkpoint_states is not None:
logging.info('Previous best checkpoint paths: %s',
best_checkpoint_states.all_model_checkpoint_paths)
best_turn_saver.recover_last_checkpoints(
best_checkpoint_states.all_model_checkpoint_paths)
if last_checkpoint_states is not None:
logging.info('Previous last checkpoint paths: %s',
last_checkpoint_states.all_model_checkpoint_paths)
last_turn_saver.recover_last_checkpoints(
last_checkpoint_states.all_model_checkpoint_paths)
def maybe_save_checkpoint(saver, filename):
if FLAGS.save_checkpoints:
logging.info('Saving checkpoint %s', filename)
sync_training_state_to_graph()
saver.save(tf.get_default_session(), filename,
global_step=model.global_step())
# Callback for monitor.
def save_best_checkpoint():
maybe_save_checkpoint(best_turn_saver, best_checkpoint_filename)
# Callback for train_loop.
def save_last_checkpoint():
maybe_save_checkpoint(last_turn_saver, last_checkpoint_filename)
# The monitor keeps track of the best result so far, does early stopping.
monitor = monitoring.TrainingMonitor(
max_turns=config.turns,
tuner=tuner,
new_best_fn=save_best_checkpoint,
es_turns=FLAGS.early_stopping_turns,
es_rampup_turns=FLAGS.early_stopping_rampup_turns,
es_slowest_rate=FLAGS.early_stopping_slowest_rate)
# Set up the learning rate scheduler
lr_scheduler = monitoring.LearningRateScheduler(
base_learning_rate=config.learning_rate,
monitor=monitor,
drop_multiplier=config.drop_learning_rate_multiplier,
drop_turns=config.drop_learning_rate_turns,
drop_at_turn_at_the_latest=config.drop_learning_rate_at_the_latest)
with tf.Session(config=session_config) as sess:
logging.info('Initializing model.')
sess.run(tf.global_variables_initializer())
# Load the checkpoint specified by the user or try to resume from last.
if FLAGS.load_checkpoint:
checkpoint_filename = os.path.join(experiment_dir,
FLAGS.load_checkpoint)
_load_checkpoint(checkpoint_filename, training_state.variables(),
not FLAGS.load_optimizer_state)
if FLAGS.load_optimizer_state:
sync_training_state_from_graph()
if averaged and FLAGS.load_averaged:
averaged.switch_to_average()
averaged.reset()
else:
try:
_load_checkpoint(last_checkpoint_dir, training_state.variables())
sync_training_state_from_graph()
# TODO(melisgl): The training iterator state and last_state are not
# saved currently. They should be, of course, but failing that random
# initialization of dataset iterators ensures that there is no bias
# introduced if training is repeatedly interrupted and continued from
# a checkpoint. So use a random seed in this case.
random.seed()
np.random.seed()
except (ValueError, tf.errors.NotFoundError):
logging.info('Last checkpoint file %s does not exist.',
last_checkpoint_filename)
# Takes a lot of space. Disabled for now.
# summary_writer = tf.summary.FileWriter(
# experiment_dir, graph=sess.graph,
# flush_secs=FLAGS.summary_flush_secs)
summary_writer = None
if FLAGS.dyneval:
dyneval = Dyneval(model.clipped_grads_and_vars,
learning_rate=FLAGS.dyneval_learning_rate,
decay_rate=FLAGS.dyneval_decay_rate,
epsilon=FLAGS.dyneval_epsilon)
else:
dyneval = None
if config.turns > 0:
logging.info('Starting training.')
else:
logging.info('Starting testing.')
metrics = _train_loop(
monitor, lr_scheduler, averaged, dyneval, model, data, vocab, config,
summary_writer, save_last_checkpoint)
logging.info('Training finished.')
return metrics, monitor.turn()
def _train_loop(monitor, lr_scheduler, averaged, dyneval, model,
data, vocab, config, summary_writer, save_last_checkpoint_fn):
source_iterator = corpus.get_batches(
data['training'], vocab,
config.batch_size,
config.max_time_steps,
num_samples=config.num_training_samples,
episodic=FLAGS.episodic,
deterministic=False,
conditioning_separator=config.conditioning_separator)
last_state = None
steps_per_sec = 0.0
def munge_max_batches_flag_value(max_batches):
if max_batches == -1:
return None
else:
return max_batches
def evaluate0():
# KLUDGE: This depends on monitor calling this function before using the
# worst target.
monitor.set_es_worst_target(es_worst_target())
global_step = model.global_step()
logging.info('turn: %s (eval), step: %d (opt) (%.2f/s)',
monitor.turn(), global_step, steps_per_sec)
if config.accum_batch_size == -1:
eval_batch_size = config.batch_size
else:
eval_batch_size = config.accum_batch_size
training_xe, valid_xe, test_xe = evaluation.evaluate_all(
model, data, vocab, eval_batch_size, config.max_time_steps,
FLAGS.min_non_episodic_eval_examples_per_stripe,
munge_max_batches_flag_value(FLAGS.max_training_eval_batches),
munge_max_batches_flag_value(FLAGS.max_eval_eval_batches),
munge_max_batches_flag_value(FLAGS.max_test_eval_batches),
FLAGS.episodic,
config.eval_softmax_temperature,
config.eval_softmax_temperature_estimation_num_tokens,
config.eval_method,
config.num_eval_samples,
config.eval_power_mean_power,
config.eval_dropout_multiplier,
config.validation_prediction_file,
dyneval,
conditioning_separator=config.conditioning_separator)
return valid_xe, {'training_xe': training_xe,
'test_xe': test_xe,
'global_step': global_step}
def evaluate():
if monitor.averaging_triggered():
with averaged:
logging.info('Evaluating with averaged parameters.')
return evaluate0()
else:
return evaluate0()
def add_summary(summary_str):
if summary_writer is not None:
summary_writer.add_summary(summary_str, model.global_step())
def add_summaries_for_metrics():
metrics = monitor.metrics()
summary = tf.Summary()
for key in metrics:
summary.value.add(tag=key, simple_value=metrics[key])
add_summary(summary)
# Compute the early stopping worst target. It may change when the learning
# rate is dropped.
def es_worst_target():
if FLAGS.early_stopping_worst_xe_target is None:
return -1.0
else:
targets_for_lr_drops = [
float(string) for string
in FLAGS.early_stopping_worst_xe_target.split(',')
if string
]
num_drops = lr_scheduler.num_drops()
if targets_for_lr_drops:
return targets_for_lr_drops[min(num_drops, len(targets_for_lr_drops)-1)]
else:
return None
def log_summaries(summary):
utils.log_scalar_summaries(summary)
add_summary(summary)
while monitor.next_turn(evaluate):
logging.info('metrics: %r', monitor.metrics())
logging.info(
'early stopping: turns: %s, worst xe target: %s, best expected xe: %s',
monitor.effective_es_turns(), monitor.es_worst_target(),
monitor.best_expected_xe())
add_summaries_for_metrics()
# If enough turns passed without improvement, turn on averaging.
best_turn = monitor.best_xe_turn() or 0
num_tuns_since_best = monitor.turn() - best_turn
if (averaged and
((monitor.turn() > 0 and
num_tuns_since_best >= FLAGS.trigger_averaging_turns) or
(FLAGS.trigger_averaging_at_the_latest >= 0 and
monitor.turn() >= FLAGS.trigger_averaging_at_the_latest))):
monitor.set_averaging_triggered(True)
start_time = time.time()
sum_cost = 0.0
sum_tokens = 0
for _ in range(FLAGS.steps_per_turn):
cost, summary, last_state, num_tokens = train_1(
model, source_iterator, last_state,
learning_rate=lr_scheduler.learning_rate(),
accum_batch_size=model.config.accum_batch_size)
if monitor.averaging_triggered():
averaged.take_sample()
sum_cost += cost
sum_tokens += num_tokens
# Log summaries at the very beginning of training to make it easier to
# debug initialization problems.
if (model.global_step() == 1 or
(model.global_step()+1) %
FLAGS.print_training_stats_every_num_steps == 1):
log_summaries(summary)
logging.info('avg training cost at step %d: %.5f',
model.global_step(), sum_cost / sum_tokens)
sum_cost = 0.0
sum_tokens = 0
steps_per_sec = FLAGS.steps_per_turn / (time.time()-start_time)
# TODO(melisgl): Is this the right frequency for saving?
save_last_checkpoint_fn()
metrics = monitor.metrics()
logging.info('Finished at turn %d for reason: %s',
monitor.turn(), monitor.finished_reason())
logging.info('Best XE was %5.5f at turn %d',
metrics['best_xe'], metrics['best_xe_turn'])
return metrics
def train_1(model, source_iterator, last_state,
learning_rate, extra_feed=None, accum_batch_size=-1):
"""Trains model for a a single iteration."""
if accum_batch_size == -1:
cond, cond_len, source, source_len, target = next(source_iterator)
feed = _make_train_feed(model, cond, cond_len, source, source_len, target,
last_state, learning_rate, extra_feed)
batch_size = feed[model.source_len].shape[0]
num_tokens = feed[model.source_len].sum()
cost, summary, last_state = model.fit(feed)
return cost*batch_size, summary, last_state, num_tokens
else:
return _train_1_with_accum(model, source_iterator, last_state,
learning_rate, extra_feed, accum_batch_size)
def _train_1_with_accum(model, source_iterator, last_state,
learning_rate, extra_feed, accum_batch_size):
"""Trains model for a a single iteration."""
cond, cond_len, source, source_len, target = next(source_iterator)
(conds, cond_lens, sources, source_lens,
targets, last_states) = _maybe_split_batch(
cond, cond_len, source, source_len, target, last_state, accum_batch_size)
num_accum_batches = len(sources)
cost = 0.0
new_last_states = []
batch_size = 0
num_tokens = 0
for i in six.moves.range(num_accum_batches):
cond = conds[i] if cond is not None else None
cond_len = cond_lens[i] if cond_len is not None else None
source = sources[i]
source_len = source_lens[i]
target = targets[i]
if last_states is not None:
last_state = last_states[i]
else:
last_state = None
feed = _make_train_feed(model, cond, cond_len, source, source_len, target,
last_state, learning_rate, extra_feed)
batch_size1 = feed[model.source_len].shape[0]
batch_size += batch_size1
num_tokens += feed[model.source_len].sum()
cost1, summary1, last_state1 = model.accumulate_gradients(feed)
cost += cost1*batch_size1
new_last_states.append(last_state1)
model.fit_accumulated(feed)
last_state = _concat_last_states(new_last_states)
return cost, summary1, last_state, num_tokens
def _make_train_feed(model, cond, cond_len, source, source_len, target,
last_state, learning_rate, extra_feed=None):
feed = {}
model.add_input_to_feed(feed, cond, cond_len, source, source_len, target)
model.add_dropout_to_feed(feed)
feed.update({
model.num_samples: model.config.num_training_samples,
model.learning_rate: learning_rate
})
if extra_feed:
feed.update(extra_feed)
if not FLAGS.episodic and last_state is not None:
# At test time we start from zero state, so let's forget the
# current state during training too. Simply not feeding the
# previous state back would be simpler, but it distorts the
# objective too much.
if model.config.drop_state_probability > 0.0:
mask = [None]
def ensure_mask(x):
if mask[0] is None:
mask[0] = np.random.binomial(
1, 1.0-model.config.drop_state_probability,
size=[x.shape[0]*model.config.num_training_samples, 1])
return mask[0]
last_state = utils.map_nested(lambda x: ensure_mask(x)*x, last_state)
feed.update({model.initial_state: last_state})
return feed
def _maybe_split_batch(cond, cond_len, source, source_len, target, last_state,
accum_batch_size):
batch_size = source_len.shape[0]
assert batch_size % accum_batch_size == 0
n = batch_size // accum_batch_size
return (np.split(cond, n, axis=1) if cond is not None else None,
np.split(cond_len, n, axis=0) if cond_len is not None else None,
np.split(source, n, axis=1),
np.split(source_len, n, axis=0),
np.split(target, n, axis=1),
_split_last_state(last_state, n) if last_state is not None else None)
def _split_last_state(last_state, n):
list_of_split_arrays = [np.split(array, n)
for array in nest.flatten(last_state)]
list_of_split_states = zip(*list_of_split_arrays)
return [nest.pack_sequence_as(last_state, split_state)
for split_state in list_of_split_states]
def _concat_last_states(last_states):
list_of_flat_states = [nest.flatten(last_state) for last_state in last_states]
flat_list_of_states = zip(*list_of_flat_states)
flat_state = [np.concatenate(list_of_states, axis=0) for list_of_states
in flat_list_of_states]
return nest.pack_sequence_as(last_states[0], flat_state)
| |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.auth.transport.requests import AuthorizedSession # type: ignore
import json # type: ignore
import grpc # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.api_core import exceptions as core_exceptions
from google.api_core import retry as retries
from google.api_core import rest_helpers
from google.api_core import rest_streaming
from google.api_core import path_template
from google.api_core import gapic_v1
from requests import __version__ as requests_version
import dataclasses
import re
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
import warnings
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.cloud.compute_v1.types import compute
from .base import SslPoliciesTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version,
grpc_version=None,
rest_version=requests_version,
)
class SslPoliciesRestInterceptor:
"""Interceptor for SslPolicies.
Interceptors are used to manipulate requests, request metadata, and responses
in arbitrary ways.
Example use cases include:
* Logging
* Verifying requests according to service or custom semantics
* Stripping extraneous information from responses
These use cases and more can be enabled by injecting an
instance of a custom subclass when constructing the SslPoliciesRestTransport.
.. code-block:: python
class MyCustomSslPoliciesInterceptor(SslPoliciesRestInterceptor):
def pre_delete(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_delete(response):
logging.log(f"Received response: {response}")
def pre_get(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_get(response):
logging.log(f"Received response: {response}")
def pre_insert(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_insert(response):
logging.log(f"Received response: {response}")
def pre_list(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list(response):
logging.log(f"Received response: {response}")
def pre_list_available_features(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_list_available_features(response):
logging.log(f"Received response: {response}")
def pre_patch(request, metadata):
logging.log(f"Received request: {request}")
return request, metadata
def post_patch(response):
logging.log(f"Received response: {response}")
transport = SslPoliciesRestTransport(interceptor=MyCustomSslPoliciesInterceptor())
client = SslPoliciesClient(transport=transport)
"""
def pre_delete(
self,
request: compute.DeleteSslPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.DeleteSslPolicyRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for delete
Override in a subclass to manipulate the request or metadata
before they are sent to the SslPolicies server.
"""
return request, metadata
def post_delete(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for delete
Override in a subclass to manipulate the response
after it is returned by the SslPolicies server but before
it is returned to user code.
"""
return response
def pre_get(
self, request: compute.GetSslPolicyRequest, metadata: Sequence[Tuple[str, str]]
) -> Tuple[compute.GetSslPolicyRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for get
Override in a subclass to manipulate the request or metadata
before they are sent to the SslPolicies server.
"""
return request, metadata
def post_get(self, response: compute.SslPolicy) -> compute.SslPolicy:
"""Post-rpc interceptor for get
Override in a subclass to manipulate the response
after it is returned by the SslPolicies server but before
it is returned to user code.
"""
return response
def pre_insert(
self,
request: compute.InsertSslPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.InsertSslPolicyRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for insert
Override in a subclass to manipulate the request or metadata
before they are sent to the SslPolicies server.
"""
return request, metadata
def post_insert(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for insert
Override in a subclass to manipulate the response
after it is returned by the SslPolicies server but before
it is returned to user code.
"""
return response
def pre_list(
self,
request: compute.ListSslPoliciesRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.ListSslPoliciesRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for list
Override in a subclass to manipulate the request or metadata
before they are sent to the SslPolicies server.
"""
return request, metadata
def post_list(self, response: compute.SslPoliciesList) -> compute.SslPoliciesList:
"""Post-rpc interceptor for list
Override in a subclass to manipulate the response
after it is returned by the SslPolicies server but before
it is returned to user code.
"""
return response
def pre_list_available_features(
self,
request: compute.ListAvailableFeaturesSslPoliciesRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[
compute.ListAvailableFeaturesSslPoliciesRequest, Sequence[Tuple[str, str]]
]:
"""Pre-rpc interceptor for list_available_features
Override in a subclass to manipulate the request or metadata
before they are sent to the SslPolicies server.
"""
return request, metadata
def post_list_available_features(
self, response: compute.SslPoliciesListAvailableFeaturesResponse
) -> compute.SslPoliciesListAvailableFeaturesResponse:
"""Post-rpc interceptor for list_available_features
Override in a subclass to manipulate the response
after it is returned by the SslPolicies server but before
it is returned to user code.
"""
return response
def pre_patch(
self,
request: compute.PatchSslPolicyRequest,
metadata: Sequence[Tuple[str, str]],
) -> Tuple[compute.PatchSslPolicyRequest, Sequence[Tuple[str, str]]]:
"""Pre-rpc interceptor for patch
Override in a subclass to manipulate the request or metadata
before they are sent to the SslPolicies server.
"""
return request, metadata
def post_patch(self, response: compute.Operation) -> compute.Operation:
"""Post-rpc interceptor for patch
Override in a subclass to manipulate the response
after it is returned by the SslPolicies server but before
it is returned to user code.
"""
return response
@dataclasses.dataclass
class SslPoliciesRestStub:
_session: AuthorizedSession
_host: str
_interceptor: SslPoliciesRestInterceptor
class SslPoliciesRestTransport(SslPoliciesTransport):
"""REST backend transport for SslPolicies.
The SslPolicies API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends JSON representations of protocol buffers over HTTP/1.1
"""
_STUBS: Dict[str, SslPoliciesRestStub] = {}
def __init__(
self,
*,
host: str = "compute.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
url_scheme: str = "https",
interceptor: Optional[SslPoliciesRestInterceptor] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client
certificate to configure mutual TLS HTTP channel. It is ignored
if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you are developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
url_scheme: the protocol scheme for the API endpoint. Normally
"https", but for testing or local servers,
"http" can be specified.
"""
# Run the base constructor
# TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc.
# TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the
# credentials object
maybe_url_match = re.match("^(?P<scheme>http(?:s)?://)?(?P<host>.*)$", host)
if maybe_url_match is None:
raise ValueError(
f"Unexpected hostname structure: {host}"
) # pragma: NO COVER
url_match_items = maybe_url_match.groupdict()
host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
self._session = AuthorizedSession(
self._credentials, default_host=self.DEFAULT_HOST
)
if client_cert_source_for_mtls:
self._session.configure_mtls_channel(client_cert_source_for_mtls)
self._interceptor = interceptor or SslPoliciesRestInterceptor()
self._prep_wrapped_messages(client_info)
class _Delete(SslPoliciesRestStub):
def __hash__(self):
return hash("Delete")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.DeleteSslPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the delete method over HTTP.
Args:
request (~.compute.DeleteSslPolicyRequest):
The request object. A request message for
SslPolicies.Delete. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "delete",
"uri": "/compute/v1/projects/{project}/global/sslPolicies/{ssl_policy}",
},
]
request, metadata = self._interceptor.pre_delete(request, metadata)
request_kwargs = compute.DeleteSslPolicyRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.DeleteSslPolicyRequest.to_json(
compute.DeleteSslPolicyRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_delete(resp)
return resp
class _Get(SslPoliciesRestStub):
def __hash__(self):
return hash("Get")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.GetSslPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.SslPolicy:
r"""Call the get method over HTTP.
Args:
request (~.compute.GetSslPolicyRequest):
The request object. A request message for
SslPolicies.Get. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.SslPolicy:
Represents an SSL Policy resource.
Use SSL policies to control the SSL
features, such as versions and cipher
suites, offered by an HTTPS or SSL Proxy
load balancer. For more information,
read SSL Policy Concepts.
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/global/sslPolicies/{ssl_policy}",
},
]
request, metadata = self._interceptor.pre_get(request, metadata)
request_kwargs = compute.GetSslPolicyRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.GetSslPolicyRequest.to_json(
compute.GetSslPolicyRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.SslPolicy.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_get(resp)
return resp
class _Insert(SslPoliciesRestStub):
def __hash__(self):
return hash("Insert")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.InsertSslPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the insert method over HTTP.
Args:
request (~.compute.InsertSslPolicyRequest):
The request object. A request message for
SslPolicies.Insert. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "post",
"uri": "/compute/v1/projects/{project}/global/sslPolicies",
"body": "ssl_policy_resource",
},
]
request, metadata = self._interceptor.pre_insert(request, metadata)
request_kwargs = compute.InsertSslPolicyRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.SslPolicy.to_json(
compute.SslPolicy(transcoded_request["body"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.InsertSslPolicyRequest.to_json(
compute.InsertSslPolicyRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_insert(resp)
return resp
class _List(SslPoliciesRestStub):
def __hash__(self):
return hash("List")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.ListSslPoliciesRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.SslPoliciesList:
r"""Call the list method over HTTP.
Args:
request (~.compute.ListSslPoliciesRequest):
The request object. A request message for
SslPolicies.List. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.SslPoliciesList:
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/global/sslPolicies",
},
]
request, metadata = self._interceptor.pre_list(request, metadata)
request_kwargs = compute.ListSslPoliciesRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.ListSslPoliciesRequest.to_json(
compute.ListSslPoliciesRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.SslPoliciesList.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_list(resp)
return resp
class _ListAvailableFeatures(SslPoliciesRestStub):
def __hash__(self):
return hash("ListAvailableFeatures")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.ListAvailableFeaturesSslPoliciesRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.SslPoliciesListAvailableFeaturesResponse:
r"""Call the list available features method over HTTP.
Args:
request (~.compute.ListAvailableFeaturesSslPoliciesRequest):
The request object. A request message for
SslPolicies.ListAvailableFeatures. See
the method description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.SslPoliciesListAvailableFeaturesResponse:
"""
http_options: List[Dict[str, str]] = [
{
"method": "get",
"uri": "/compute/v1/projects/{project}/global/sslPolicies/listAvailableFeatures",
},
]
request, metadata = self._interceptor.pre_list_available_features(
request, metadata
)
request_kwargs = compute.ListAvailableFeaturesSslPoliciesRequest.to_dict(
request
)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.ListAvailableFeaturesSslPoliciesRequest.to_json(
compute.ListAvailableFeaturesSslPoliciesRequest(
transcoded_request["query_params"]
),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.SslPoliciesListAvailableFeaturesResponse.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_list_available_features(resp)
return resp
class _Patch(SslPoliciesRestStub):
def __hash__(self):
return hash("Patch")
__REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, str] = {}
@classmethod
def _get_unset_required_fields(cls, message_dict):
return {
k: v
for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items()
if k not in message_dict
}
def __call__(
self,
request: compute.PatchSslPolicyRequest,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the patch method over HTTP.
Args:
request (~.compute.PatchSslPolicyRequest):
The request object. A request message for
SslPolicies.Patch. See the method
description for details.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource. Google Compute Engine
has three Operation resources: \*
`Global </compute/docs/reference/rest/v1/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/v1/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/v1/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses. Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource. - For regional operations, use the
``regionOperations`` resource. - For zonal operations,
use the ``zonalOperations`` resource. For more
information, read Global, Regional, and Zonal Resources.
"""
http_options: List[Dict[str, str]] = [
{
"method": "patch",
"uri": "/compute/v1/projects/{project}/global/sslPolicies/{ssl_policy}",
"body": "ssl_policy_resource",
},
]
request, metadata = self._interceptor.pre_patch(request, metadata)
request_kwargs = compute.PatchSslPolicyRequest.to_dict(request)
transcoded_request = path_template.transcode(http_options, **request_kwargs)
# Jsonify the request body
body = compute.SslPolicy.to_json(
compute.SslPolicy(transcoded_request["body"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
uri = transcoded_request["uri"]
method = transcoded_request["method"]
# Jsonify the query params
query_params = json.loads(
compute.PatchSslPolicyRequest.to_json(
compute.PatchSslPolicyRequest(transcoded_request["query_params"]),
including_default_value_fields=False,
use_integers_for_enums=False,
)
)
query_params.update(self._get_unset_required_fields(query_params))
# Send the request
headers = dict(metadata)
headers["Content-Type"] = "application/json"
response = getattr(self._session, method)(
"{host}{uri}".format(host=self._host, uri=uri),
timeout=timeout,
headers=headers,
params=rest_helpers.flatten_query_params(query_params),
data=body,
)
# In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception
# subclass.
if response.status_code >= 400:
raise core_exceptions.from_http_response(response)
# Return the response
resp = compute.Operation.from_json(
response.content, ignore_unknown_fields=True
)
resp = self._interceptor.post_patch(resp)
return resp
@property
def delete(self) -> Callable[[compute.DeleteSslPolicyRequest], compute.Operation]:
stub = self._STUBS.get("delete")
if not stub:
stub = self._STUBS["delete"] = self._Delete(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def get(self) -> Callable[[compute.GetSslPolicyRequest], compute.SslPolicy]:
stub = self._STUBS.get("get")
if not stub:
stub = self._STUBS["get"] = self._Get(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def insert(self) -> Callable[[compute.InsertSslPolicyRequest], compute.Operation]:
stub = self._STUBS.get("insert")
if not stub:
stub = self._STUBS["insert"] = self._Insert(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def list(
self,
) -> Callable[[compute.ListSslPoliciesRequest], compute.SslPoliciesList]:
stub = self._STUBS.get("list")
if not stub:
stub = self._STUBS["list"] = self._List(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def list_available_features(
self,
) -> Callable[
[compute.ListAvailableFeaturesSslPoliciesRequest],
compute.SslPoliciesListAvailableFeaturesResponse,
]:
stub = self._STUBS.get("list_available_features")
if not stub:
stub = self._STUBS["list_available_features"] = self._ListAvailableFeatures(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
@property
def patch(self) -> Callable[[compute.PatchSslPolicyRequest], compute.Operation]:
stub = self._STUBS.get("patch")
if not stub:
stub = self._STUBS["patch"] = self._Patch(
self._session, self._host, self._interceptor
)
# The return type is fine, but mypy isn't sophisticated enough to determine what's going on here.
# In C++ this would require a dynamic_cast
return stub # type: ignore
def close(self):
self._session.close()
__all__ = ("SslPoliciesRestTransport",)
| |
"""Implementation of RootOf class and related tools. """
from __future__ import print_function, division
from sympy.core import (S, Expr, Integer, Float, I, Add, Lambda, symbols,
sympify, Rational)
from sympy.core.cache import cacheit
from sympy.core.function import AppliedUndef
from sympy.functions.elementary.miscellaneous import root as _root
from sympy.polys.polytools import Poly, PurePoly, factor
from sympy.polys.rationaltools import together
from sympy.polys.polyfuncs import symmetrize, viete
from sympy.polys.rootisolation import (
dup_isolate_complex_roots_sqf,
dup_isolate_real_roots_sqf,
ComplexInterval)
from sympy.polys.polyroots import (
roots_linear, roots_quadratic, roots_binomial,
preprocess_roots, roots)
from sympy.polys.polyerrors import (
MultivariatePolynomialError,
GeneratorsNeeded,
PolynomialError,
DomainError)
from sympy.polys.domains import QQ
from sympy.mpmath import mp, mpf, mpc, findroot, workprec
from sympy.mpmath.libmp.libmpf import prec_to_dps
from sympy.utilities import lambdify, public
from sympy.core.compatibility import xrange
from math import log as mathlog
def _ispow2(i):
v = mathlog(i, 2)
return v == int(v)
_reals_cache = {}
_complexes_cache = {}
@public
class RootOf(Expr):
"""Represents ``k``-th root of a univariate polynomial. """
__slots__ = ['poly', 'index']
is_complex = True
def __new__(cls, f, x, index=None, radicals=True, expand=True):
"""Construct a new ``RootOf`` object for ``k``-th root of ``f``. """
x = sympify(x)
if index is None and x.is_Integer:
x, index = None, x
else:
index = sympify(index)
if index is not None and index.is_Integer:
index = int(index)
else:
raise ValueError("expected an integer root index, got %s" % index)
poly = PurePoly(f, x, greedy=False, expand=expand)
if not poly.is_univariate:
raise PolynomialError("only univariate polynomials are allowed")
degree = poly.degree()
if degree <= 0:
raise PolynomialError("can't construct RootOf object for %s" % f)
if index < -degree or index >= degree:
raise IndexError("root index out of [%d, %d] range, got %d" %
(-degree, degree - 1, index))
elif index < 0:
index += degree
dom = poly.get_domain()
if not dom.is_Exact:
poly = poly.to_exact()
roots = cls._roots_trivial(poly, radicals)
if roots is not None:
return roots[index]
coeff, poly = preprocess_roots(poly)
dom = poly.get_domain()
if not dom.is_ZZ:
raise NotImplementedError("RootOf is not supported over %s" % dom)
root = cls._indexed_root(poly, index)
return coeff*cls._postprocess_root(root, radicals)
@classmethod
def _new(cls, poly, index):
"""Construct new ``RootOf`` object from raw data. """
obj = Expr.__new__(cls)
obj.poly = PurePoly(poly)
obj.index = index
try:
_reals_cache[obj.poly] = _reals_cache[poly]
_complexes_cache[obj.poly] = _complexes_cache[poly]
except KeyError:
pass
return obj
def _hashable_content(self):
return (self.poly, self.index)
@property
def expr(self):
return self.poly.as_expr()
@property
def args(self):
return (self.expr, Integer(self.index))
@property
def free_symbols(self):
# RootOf currently only works with univariate expressions and although
# the poly attribute is often a PurePoly, sometimes it is a Poly. In
# either case no free symbols should be reported.
return set()
def _eval_is_real(self):
"""Return ``True`` if the root is real. """
return self.index < len(_reals_cache[self.poly])
@classmethod
def real_roots(cls, poly, radicals=True):
"""Get real roots of a polynomial. """
return cls._get_roots("_real_roots", poly, radicals)
@classmethod
def all_roots(cls, poly, radicals=True):
"""Get real and complex roots of a polynomial. """
return cls._get_roots("_all_roots", poly, radicals)
@classmethod
def _get_reals_sqf(cls, factor):
"""Compute real root isolating intervals for a square-free polynomial. """
if factor in _reals_cache:
real_part = _reals_cache[factor]
else:
_reals_cache[factor] = real_part = \
dup_isolate_real_roots_sqf(
factor.rep.rep, factor.rep.dom, blackbox=True)
return real_part
@classmethod
def _get_complexes_sqf(cls, factor):
"""Compute complex root isolating intervals for a square-free polynomial. """
if factor in _complexes_cache:
complex_part = _complexes_cache[factor]
else:
_complexes_cache[factor] = complex_part = \
dup_isolate_complex_roots_sqf(
factor.rep.rep, factor.rep.dom, blackbox=True)
return complex_part
@classmethod
def _get_reals(cls, factors):
"""Compute real root isolating intervals for a list of factors. """
reals = []
for factor, k in factors:
real_part = cls._get_reals_sqf(factor)
reals.extend([ (root, factor, k) for root in real_part ])
return reals
@classmethod
def _get_complexes(cls, factors):
"""Compute complex root isolating intervals for a list of factors. """
complexes = []
for factor, k in factors:
complex_part = cls._get_complexes_sqf(factor)
complexes.extend([ (root, factor, k) for root in complex_part ])
return complexes
@classmethod
def _reals_sorted(cls, reals):
"""Make real isolating intervals disjoint and sort roots. """
cache = {}
for i, (u, f, k) in enumerate(reals):
for j, (v, g, m) in enumerate(reals[i + 1:]):
u, v = u.refine_disjoint(v)
reals[i + j + 1] = (v, g, m)
reals[i] = (u, f, k)
reals = sorted(reals, key=lambda r: r[0].a)
for root, factor, _ in reals:
if factor in cache:
cache[factor].append(root)
else:
cache[factor] = [root]
for factor, roots in cache.items():
_reals_cache[factor] = roots
return reals
@classmethod
def _separate_imaginary_from_complex(cls, complexes):
from sympy.utilities.iterables import sift
def is_imag(c):
'''
return True if all roots are imaginary (ax**2 + b)
return False if no roots are imaginary
return None if 2 roots are imaginary (ax**N'''
u, f, k = c
deg = f.degree()
if f.length() == 2:
if deg == 2:
return True # both imag
elif _ispow2(deg):
if f.LC()*f.TC() < 0:
return None # 2 are imag
return False # none are imag
# separate according to the function
sifted = sift(complexes, lambda c: c[1])
del complexes
imag = []
complexes = []
for f in sifted:
isift = sift(sifted[f], lambda c: is_imag(c))
imag.extend(isift.pop(True, []))
complexes.extend(isift.pop(False, []))
mixed = isift.pop(None, [])
assert not isift
if not mixed:
continue
while True:
# the non-imaginary ones will be on one side or the other
# of the y-axis
i = 0
while i < len(mixed):
u, f, k = mixed[i]
if u.ax*u.bx > 0:
complexes.append(mixed.pop(i))
else:
i += 1
if len(mixed) == 2:
imag.extend(mixed)
break
# refine
for i, (u, f, k) in enumerate(mixed):
u = u._inner_refine()
mixed[i] = u, f, k
return imag, complexes
@classmethod
def _refine_complexes(cls, complexes):
"""return complexes such that no bounding rectangles of non-conjugate
roots would intersect if slid horizontally or vertically/
"""
from sympy.utilities.iterables import sift
while complexes: # break when all are distinct
# get the intervals pairwise-disjoint. If rectangles were drawn around
# the coordinates of the bounding rectangles, no rectangles would
# intersect after this procedure
for i, (u, f, k) in enumerate(complexes):
for j, (v, g, m) in enumerate(complexes[i + 1:]):
u, v = u.refine_disjoint(v)
complexes[i + j + 1] = (v, g, m)
complexes[i] = (u, f, k)
# Although there are no intersecting rectangles, a given rectangle
# might intersect another when slid horizontally. We have to refine
# intervals until this is not true so we can sort the roots
# unambiguously. Since complex roots come in conjugate pairs, we
# will always have 2 rectangles above each other but we should not
# have more than that.
N = len(complexes)//2 - 1
# check x (real) parts: there must be N + 1 disjoint x ranges, i.e.
# the first one must be different from N others
uu = set([(u.ax, u.bx) for u, _, _ in complexes])
u = uu.pop()
if sum([u[1] <= v[0] or v[1] <= u[0] for v in uu]) < N:
# refine
for i, (u, f, k) in enumerate(complexes):
u = u._inner_refine()
complexes[i] = u, f, k
else:
# intervals with identical x-values have disjoint y-values or
# else they would not be disjoint so there is no need for
# further checks
break
return complexes
@classmethod
def _complexes_sorted(cls, complexes):
"""Make complex isolating intervals disjoint and sort roots. """
if not complexes:
return []
cache = {}
# imaginary roots can cause a problem in terms of sorting since
# their x-intervals will never refine as distinct from others
# so we handle them separately
imag, complexes = cls._separate_imaginary_from_complex(complexes)
complexes = cls._refine_complexes(complexes)
# sort imaginary roots
def key(c):
'''return, for ax**n+b, +/-root(abs(b/a), b) according to the
apparent sign of the imaginary interval, e.g. if the interval
were (0, 3) the positive root would be returned.
'''
u, f, k = c
r = _root(abs(f.TC()/f.LC()), f.degree())
if u.ay < 0 or u.by < 0:
return -r
return r
imag = sorted(imag, key=lambda c: key(c))
# sort complexes and combine with imag
if complexes:
# key is (x1, y1) e.g. (1, 2)x(3, 4) -> (1,3)
complexes = sorted(complexes, key=
lambda c: c[0].a)
# find insertion point for imaginary
for i, c in enumerate(reversed(complexes)):
if c[0].bx <= 0:
break
i = len(complexes) - i - 1
if i:
i += 1
complexes = complexes[:i] + imag + complexes[i:]
else:
complexes = imag
# update cache
for root, factor, _ in complexes:
if factor in cache:
cache[factor].append(root)
else:
cache[factor] = [root]
for factor, roots in cache.items():
_complexes_cache[factor] = roots
return complexes
@classmethod
def _reals_index(cls, reals, index):
"""Map initial real root index to an index in a factor where the root belongs. """
i = 0
for j, (_, factor, k) in enumerate(reals):
if index < i + k:
poly, index = factor, 0
for _, factor, _ in reals[:j]:
if factor == poly:
index += 1
return poly, index
else:
i += k
@classmethod
def _complexes_index(cls, complexes, index):
"""Map initial complex root index to an index in a factor where the root belongs. """
index, i = index, 0
for j, (_, factor, k) in enumerate(complexes):
if index < i + k:
poly, index = factor, 0
for _, factor, _ in complexes[:j]:
if factor == poly:
index += 1
index += len(_reals_cache[poly])
return poly, index
else:
i += k
@classmethod
def _count_roots(cls, roots):
"""Count the number of real or complex roots including multiplicites. """
return sum([ k for _, _, k in roots ])
@classmethod
def _indexed_root(cls, poly, index):
"""Get a root of a composite polynomial by index. """
(_, factors) = poly.factor_list()
reals = cls._get_reals(factors)
reals_count = cls._count_roots(reals)
if index < reals_count:
reals = cls._reals_sorted(reals)
return cls._reals_index(reals, index)
else:
complexes = cls._get_complexes(factors)
complexes = cls._complexes_sorted(complexes)
return cls._complexes_index(complexes, index - reals_count)
@classmethod
def _real_roots(cls, poly):
"""Get real roots of a composite polynomial. """
(_, factors) = poly.factor_list()
reals = cls._get_reals(factors)
reals = cls._reals_sorted(reals)
reals_count = cls._count_roots(reals)
roots = []
for index in xrange(0, reals_count):
roots.append(cls._reals_index(reals, index))
return roots
@classmethod
def _all_roots(cls, poly):
"""Get real and complex roots of a composite polynomial. """
(_, factors) = poly.factor_list()
reals = cls._get_reals(factors)
reals = cls._reals_sorted(reals)
reals_count = cls._count_roots(reals)
roots = []
for index in xrange(0, reals_count):
roots.append(cls._reals_index(reals, index))
complexes = cls._get_complexes(factors)
complexes = cls._complexes_sorted(complexes)
complexes_count = cls._count_roots(complexes)
for index in xrange(0, complexes_count):
roots.append(cls._complexes_index(complexes, index))
return roots
@classmethod
@cacheit
def _roots_trivial(cls, poly, radicals):
"""Compute roots in linear, quadratic and binomial cases. """
if poly.degree() == 1:
return roots_linear(poly)
if not radicals:
return None
if poly.degree() == 2:
return roots_quadratic(poly)
elif poly.length() == 2 and poly.TC():
return roots_binomial(poly)
else:
return None
@classmethod
def _preprocess_roots(cls, poly):
"""Take heroic measures to make ``poly`` compatible with ``RootOf``. """
dom = poly.get_domain()
if not dom.is_Exact:
poly = poly.to_exact()
coeff, poly = preprocess_roots(poly)
dom = poly.get_domain()
if not dom.is_ZZ:
raise NotImplementedError(
"sorted roots not supported over %s" % dom)
return coeff, poly
@classmethod
def _postprocess_root(cls, root, radicals):
"""Return the root if it is trivial or a ``RootOf`` object. """
poly, index = root
roots = cls._roots_trivial(poly, radicals)
if roots is not None:
return roots[index]
else:
return cls._new(poly, index)
@classmethod
def _get_roots(cls, method, poly, radicals):
"""Return postprocessed roots of specified kind. """
if not poly.is_univariate:
raise PolynomialError("only univariate polynomials are allowed")
coeff, poly = cls._preprocess_roots(poly)
roots = []
for root in getattr(cls, method)(poly):
roots.append(coeff*cls._postprocess_root(root, radicals))
return roots
def _get_interval(self):
"""Internal function for retrieving isolation interval from cache. """
if self.is_real:
return _reals_cache[self.poly][self.index]
else:
reals_count = len(_reals_cache[self.poly])
return _complexes_cache[self.poly][self.index - reals_count]
def _set_interval(self, interval):
"""Internal function for updating isolation interval in cache. """
if self.is_real:
_reals_cache[self.poly][self.index] = interval
else:
reals_count = len(_reals_cache[self.poly])
_complexes_cache[self.poly][self.index - reals_count] = interval
def _eval_evalf(self, prec):
"""Evaluate this complex root to the given precision. """
with workprec(prec):
func = lambdify(self.poly.gen, self.expr)
interval = self._get_interval()
if not self.is_real:
# For complex intervals, we need to keep refining until the
# imaginary interval is disjunct with other roots, that is,
# until both ends get refined.
ay = interval.ay
by = interval.by
while interval.ay == ay or interval.by == by:
interval = interval.refine()
while True:
if self.is_real:
x0 = mpf(str(interval.center))
else:
x0 = mpc(*map(str, interval.center))
try:
root = findroot(func, x0)
# If the (real or complex) root is not in the 'interval',
# then keep refining the interval. This happens if findroot
# accidentally finds a different root outside of this
# interval because our initial estimate 'x0' was not close
# enough.
if self.is_real:
a = mpf(str(interval.a))
b = mpf(str(interval.b))
if not (a < root < b):
raise ValueError("Root not in the interval.")
else:
ax = mpf(str(interval.ax))
bx = mpf(str(interval.bx))
ay = mpf(str(interval.ay))
by = mpf(str(interval.by))
if not (ax < root.real < bx and ay < root.imag < by):
raise ValueError("Root not in the interval.")
except ValueError:
interval = interval.refine()
continue
else:
break
return Float._new(root.real._mpf_, prec) + I*Float._new(root.imag._mpf_, prec)
def eval_rational(self, tol):
"""
Returns a Rational approximation to ``self`` with the tolerance ``tol``.
This method uses bisection, which is very robust and it will always
converge. The returned Rational instance will be at most 'tol' from the
exact root.
The following example first obtains Rational approximation to 1e-7
accuracy for all roots of the 4-th order Legendre polynomial, and then
evaluates it to 5 decimal digits (so all digits will be correct
including rounding):
>>> from sympy import S, legendre_poly, Symbol
>>> x = Symbol("x")
>>> p = legendre_poly(4, x, polys=True)
>>> roots = [r.eval_rational(S(1)/10**7) for r in p.real_roots()]
>>> roots = [str(r.n(5)) for r in roots]
>>> roots
['-0.86114', '-0.33998', '0.33998', '0.86114']
"""
if not self.is_real:
raise NotImplementedError("eval_rational() only works for real polynomials so far")
func = lambdify(self.poly.gen, self.expr)
interval = self._get_interval()
a = Rational(str(interval.a))
b = Rational(str(interval.b))
return bisect(func, a, b, tol)
def _eval_Eq(self, other):
# RootOf represents a Root, so if other is that root, it should set
# the expression to zero *and* it should be in the interval of the
# RootOf instance. It must also be a number that agrees with the
# is_real value of the RootOf instance.
if type(self) == type(other):
return sympify(self.__eq__(other))
if not (other.is_number and not other.has(AppliedUndef)):
return S.false
if not other.is_finite:
return S.false
z = self.expr.subs(self.expr.free_symbols.pop(), other).is_zero
if z is False:
return S.false
o = other.is_real, other.is_imaginary
s = self.is_real, self.is_imaginary
if o != s and None not in o and None not in s:
return S.false
if z:
i = self._get_interval()
was = i.a, i.b
need = [1, 1]
# make sure it would be distinct from others
while any(need):
i = i.refine()
a, b = i.a, i.b
if need[0] and a != was[0]:
need[0] = 0
if need[1] and b != was[1]:
need[1] = 0
if self.is_real:
a, b = [Rational(str(i)) for i in (a, b)]
return sympify(a < other and other < b)
re, im = other.as_real_imag()
z = r1, r2, i1, i2 = [Rational(str(j)) for j in (
i.ax, i.bx, i.ay, i.by)]
return sympify((
r1 < re and re < r2) and (
i1 < im and im < i2))
@public
class RootSum(Expr):
"""Represents a sum of all roots of a univariate polynomial. """
__slots__ = ['poly', 'fun', 'auto']
def __new__(cls, expr, func=None, x=None, auto=True, quadratic=False):
"""Construct a new ``RootSum`` instance carrying all roots of a polynomial. """
coeff, poly = cls._transform(expr, x)
if not poly.is_univariate:
raise MultivariatePolynomialError(
"only univariate polynomials are allowed")
if func is None:
func = Lambda(poly.gen, poly.gen)
else:
try:
is_func = func.is_Function
except AttributeError:
is_func = False
if is_func and 1 in func.nargs:
if not isinstance(func, Lambda):
func = Lambda(poly.gen, func(poly.gen))
else:
raise ValueError(
"expected a univariate function, got %s" % func)
var, expr = func.variables[0], func.expr
if coeff is not S.One:
expr = expr.subs(var, coeff*var)
deg = poly.degree()
if not expr.has(var):
return deg*expr
if expr.is_Add:
add_const, expr = expr.as_independent(var)
else:
add_const = S.Zero
if expr.is_Mul:
mul_const, expr = expr.as_independent(var)
else:
mul_const = S.One
func = Lambda(var, expr)
rational = cls._is_func_rational(poly, func)
(_, factors), terms = poly.factor_list(), []
for poly, k in factors:
if poly.is_linear:
term = func(roots_linear(poly)[0])
elif quadratic and poly.is_quadratic:
term = sum(map(func, roots_quadratic(poly)))
else:
if not rational or not auto:
term = cls._new(poly, func, auto)
else:
term = cls._rational_case(poly, func)
terms.append(k*term)
return mul_const*Add(*terms) + deg*add_const
@classmethod
def _new(cls, poly, func, auto=True):
"""Construct new raw ``RootSum`` instance. """
obj = Expr.__new__(cls)
obj.poly = poly
obj.fun = func
obj.auto = auto
return obj
@classmethod
def new(cls, poly, func, auto=True):
"""Construct new ``RootSum`` instance. """
if not func.expr.has(*func.variables):
return func.expr
rational = cls._is_func_rational(poly, func)
if not rational or not auto:
return cls._new(poly, func, auto)
else:
return cls._rational_case(poly, func)
@classmethod
def _transform(cls, expr, x):
"""Transform an expression to a polynomial. """
poly = PurePoly(expr, x, greedy=False)
return preprocess_roots(poly)
@classmethod
def _is_func_rational(cls, poly, func):
"""Check if a lambda is areational function. """
var, expr = func.variables[0], func.expr
return expr.is_rational_function(var)
@classmethod
def _rational_case(cls, poly, func):
"""Handle the rational function case. """
roots = symbols('r:%d' % poly.degree())
var, expr = func.variables[0], func.expr
f = sum(expr.subs(var, r) for r in roots)
p, q = together(f).as_numer_denom()
domain = QQ[roots]
p = p.expand()
q = q.expand()
try:
p = Poly(p, domain=domain, expand=False)
except GeneratorsNeeded:
p, p_coeff = None, (p,)
else:
p_monom, p_coeff = zip(*p.terms())
try:
q = Poly(q, domain=domain, expand=False)
except GeneratorsNeeded:
q, q_coeff = None, (q,)
else:
q_monom, q_coeff = zip(*q.terms())
coeffs, mapping = symmetrize(p_coeff + q_coeff, formal=True)
formulas, values = viete(poly, roots), []
for (sym, _), (_, val) in zip(mapping, formulas):
values.append((sym, val))
for i, (coeff, _) in enumerate(coeffs):
coeffs[i] = coeff.subs(values)
n = len(p_coeff)
p_coeff = coeffs[:n]
q_coeff = coeffs[n:]
if p is not None:
p = Poly(dict(zip(p_monom, p_coeff)), *p.gens).as_expr()
else:
(p,) = p_coeff
if q is not None:
q = Poly(dict(zip(q_monom, q_coeff)), *q.gens).as_expr()
else:
(q,) = q_coeff
return factor(p/q)
def _hashable_content(self):
return (self.poly, self.fun)
@property
def expr(self):
return self.poly.as_expr()
@property
def args(self):
return (self.expr, self.fun, self.poly.gen)
@property
def free_symbols(self):
return self.poly.free_symbols | self.fun.free_symbols
@property
def is_commutative(self):
return True
def doit(self, **hints):
if not hints.get('roots', True):
return self
_roots = roots(self.poly, multiple=True)
if len(_roots) < self.poly.degree():
return self
else:
return Add(*[ self.fun(r) for r in _roots ])
def _eval_evalf(self, prec):
try:
_roots = self.poly.nroots(n=prec_to_dps(prec))
except (DomainError, PolynomialError):
return self
else:
return Add(*[ self.fun(r) for r in _roots ])
def _eval_derivative(self, x):
var, expr = self.fun.args
func = Lambda(var, expr.diff(x))
return self.new(self.poly, func, self.auto)
def bisect(f, a, b, tol):
"""
Implements bisection. This function is used in RootOf.eval_rational() and
it needs to be robust.
Examples
========
>>> from sympy import S
>>> from sympy.polys.rootoftools import bisect
>>> bisect(lambda x: x**2-1, -10, 0, S(1)/10**2)
-1025/1024
>>> bisect(lambda x: x**2-1, -10, 0, S(1)/10**4)
-131075/131072
"""
a = sympify(a)
b = sympify(b)
fa = f(a)
fb = f(b)
if fa * fb >= 0:
raise ValueError("bisect: f(a) and f(b) must have opposite signs")
while (b - a > tol):
c = (a + b)/2
fc = f(c)
if (fc == 0):
return c # We need to make sure f(c) is not zero below
if (fa * fc < 0):
b = c
fb = fc
else:
a = c
fa = fc
return (a + b)/2
| |
# UserHandlers: Code for retrieving, storing and showing Profiles
#
# Date Time Prog Note
# 31-Aug-2013 2:18 AM ATC
# ATC = Ali Taylan Cemgil,
# Department of Computer Engineering, Bogazici University
# e-mail : taylan.cemgil@boun.edu.tr
# TODO: Having separate code for a single and multiple profiles is not necessary
# store_*_profile functions can be merged
import json
import bson.json_util
import tornado.ioloop
import tornado.web
from tornado import gen
from tornado.gen import Return
from tornado.web import HTTPError
from tornado.web import MissingArgumentError
import utils.drnj_time as drnj_time
from drenaj_api.utils.drenaj_collection_templates import *
from schedulerMainHandler import markProtected
MAX_LIM_TO_VIEW_PROFILES = 10000
# route: (r"/profiles/(store|view)", UserProfilesHandler),
class UserProfilesHandler(tornado.web.RequestHandler):
def set_default_headers(self):
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Methods", "GET, POST, DELETE, PUT, OPTIONS")
self.set_header('Access-Control-Allow-Headers',
'Origin, X-Requested-With, Content-Type, Accept')
def get(self, *args):
self.post(*args)
# self.write("not implemented yet")
@tornado.web.asynchronous
@gen.coroutine
def post(self, *args):
"""
Note: OG: I chose to handle all options at once, using only POST requests
for API requests. GET requests will be used for browser examination.
"""
motor_column = self.application.db.motor_column
store_or_view = args[0]
print 'UserProfilesHandler. Command:', store_or_view
if ((store_or_view is not None and store_or_view == 'view') or (store_or_view is None)):
try:
limit = self.get_argument('limit', 20)
if limit > MAX_LIM_TO_VIEW_PROFILES:
limit = MAX_LIM_TO_VIEW_PROFILES
campaign_or_user = args[1]
tmp = []
if (campaign_or_user == 'campaign'):
campaign_id = self.get_argument('campaign_id', None)
cursor = motor_column.tweets. \
find({'campaign_id': campaign_id,
'tweet.user.history': False}). \
sort('record_retrieved_at', -1). \
limit(limit)
for record in (yield cursor.to_list(length=100)):
user = record['tweet']['user']
user['present'] = True
user['record_retrieved_at'] = record['record_retrieved_at']
# id_str = user['id_str']
# user['known_followers_count'] = graph_coll.find({'friend_id_str': id_str}).count();
# user['known_friends_count'] = graph_coll.find({'id_str': id_str}).count();
id = user['id']
user['known_followers_count'] = \
yield motor_column.graph.find({'friend_id': id}).count()
user['known_friends_count'] = \
yield motor_column.graph.find({'id': id}).count()
tmp.append(user)
elif (campaign_or_user == 'user'):
user_id_list = self.get_argument('user_id_list', '')
user_id_array = user_id_list.split(',')
for user_id_str in user_id_array:
record = None
print "USER " + user_id_str
if user_id_str:
record = yield motor_column.tweets. \
find_one({'tweet.user.id_str': user_id_str,
'tweet.user.history': False})
else:
continue
user = dict()
if record:
user = record['tweet']['user']
user['present'] = True
user['record_retrieved_at'] = record['record_retrieved_at']
# id_str = user['id_str']
# user['known_followers_count'] = graph_coll.find({'friend_id_str': id_str}).count();
# user['known_friends_count'] = graph_coll.find({'id_str': id_str}).count();
id = user['id']
else:
user['present'] = False
user['id_str'] = user_id_str
user['known_followers_count'] = \
yield motor_column.graph.find({'friend_id_str': user_id_str}).count()
user['known_friends_count'] = \
yield motor_column.graph.find({'id_str': user_id_str}).count()
tmp.append(user)
else:
raise MissingArgumentError('campaign_id or user_id_list')
result = bson.json_util.dumps(tmp)
self.write(result)
### else:
### # TODO: View the specified user ID profiles
### result = json_encode({'message': 'Not implemented yet, View the specified user ID profiles'})
### self.write(result)
except MissingArgumentError as e:
# TODO: implement logging.
raise HTTPError(500, 'You didn''t supply %s as an argument' % e.arg_name)
elif (store_or_view == 'store'):
try:
json_user_id = self.get_argument('user_id')
ids = json.loads(json_user_id)
auth_user_id = self.get_argument('auth_user_id')
campaign_id = self.get_argument('campaign_id', 'default')
json_data = self.get_argument('v', None)
S = bson.json_util.loads(json_data)
try:
self.store_multiple_profiles(ids, S, drnjID=auth_user_id,
campaign_id=campaign_id)
except Return, r:
nids = r.value
from drenaj_api.handlers.schedulerMainHandler import markProtected
if len(nids) > 0:
for i in range(len(nids)):
markProtected(motor_column.queue, nids[i], True, auth_user_id)
print "User not Found, Removing from queue: ",
print nids[i]
# Returns profile ids that could not be retrieved
print nids
self.write(bson.json_util.dumps(nids))
except MissingArgumentError as e:
# TODO: implement logging.
raise HTTPError(500, 'You didn''t supply %s as an argument' % e.arg_name)
@gen.coroutine
def store_multiple_profiles(self, ids, S, drnjID, campaign_id):
"""
"""
# print "Received recent profile of ", v['name'], ' a.k.a. ', v['screen_name']
db = self.application.db
queue_coll = db.motor_column.queue
print S
for i in range(len(S)):
status = None
if 'status' in S[i]:
status = S[i]['status']
del S[i]['status']
else:
status = {}
status['text'] = None
status['user'] = S[i]
status['user']['history'] = False
DB_TEST_VERSION = 0.2
tweet_dat = validate_document(new_tweet_template(), {
"tweet": status,
# TODO: Replace this DB_TEST_VERSION with source code
# version later
"drenaj_service_version": DB_TEST_VERSION,
"campaign_id": campaign_id,
"record_retrieved_at": drnj_time.now_in_drnj_time(),
"retrieved_by": drnjID,
}, fail=False)
print tweet_dat
user_id = S[i]['id_str']
# print profile_dat
# Check Queue
now = drnj_time.now_in_drnj_time()
queue_query = {"id": user_id}
queue_document = validate_document(new_queue_document(), {
"id": int(user_id),
"id_str": user_id,
"profile_retrieved_at": now,
"$setOnInsert": {
"friends_retrieved_at": 0,
"followers_retrieved_at": 0,
},
"retrieved_by": drnjID
})
# creates entry if query does not exist
yield queue_coll.update(queue_query, queue_document, upsert=True)
# Insert to profiles
## profiles_query = {"profile.id": user_id}
## prof = profiles_collection.find_and_modify(profiles_query, remove=True)
## if prof is not None:
## profiles_history_collection.insert(prof)
##
## profiles_collection.insert(profile_dat)
# this call marks the current entries as history
# maybe we won't need this for certain queries
db.move_to_history(user_id)
db.insert_tweet(tweet_dat)
# tweets_collection.insert(tweet_dat)
ids.remove(int(user_id))
raise Return(ids)
| |
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.cloudtrail import exceptions
from boto.compat import json
class CloudTrailConnection(AWSQueryConnection):
"""
AWS Cloud Trail
This is the CloudTrail API Reference. It provides descriptions of
actions, data types, common parameters, and common errors for
CloudTrail.
CloudTrail is a web service that records AWS API calls for your
AWS account and delivers log files to an Amazon S3 bucket. The
recorded information includes the identity of the user, the start
time of the AWS API call, the source IP address, the request
parameters, and the response elements returned by the service.
As an alternative to using the API, you can use one of the AWS
SDKs, which consist of libraries and sample code for various
programming languages and platforms (Java, Ruby, .NET, iOS,
Android, etc.). The SDKs provide a convenient way to create
programmatic access to AWSCloudTrail. For example, the SDKs take
care of cryptographically signing requests, managing errors, and
retrying requests automatically. For information about the AWS
SDKs, including how to download and install them, see the `Tools
for Amazon Web Services page`_.
See the CloudTrail User Guide for information about the data that
is included with each AWS API call listed in the log files.
"""
APIVersion = "2013-11-01"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "cloudtrail.us-east-1.amazonaws.com"
ServiceName = "CloudTrail"
TargetPrefix = "com.amazonaws.cloudtrail.v20131101.CloudTrail_20131101"
ResponseError = JSONResponseError
_faults = {
"InvalidSnsTopicNameException": exceptions.InvalidSnsTopicNameException,
"InvalidS3BucketNameException": exceptions.InvalidS3BucketNameException,
"TrailAlreadyExistsException": exceptions.TrailAlreadyExistsException,
"InsufficientSnsTopicPolicyException": exceptions.InsufficientSnsTopicPolicyException,
"InvalidTrailNameException": exceptions.InvalidTrailNameException,
"TrailNotProvidedException": exceptions.TrailNotProvidedException,
"TrailNotFoundException": exceptions.TrailNotFoundException,
"S3BucketDoesNotExistException": exceptions.S3BucketDoesNotExistException,
"InvalidS3PrefixException": exceptions.InvalidS3PrefixException,
"MaximumNumberOfTrailsExceededException": exceptions.MaximumNumberOfTrailsExceededException,
"InsufficientS3BucketPolicyException": exceptions.InsufficientS3BucketPolicyException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(CloudTrailConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_trail(self, name=None, s3_bucket_name=None,
s3_key_prefix=None, sns_topic_name=None,
include_global_service_events=None, trail=None):
"""
From the command line, use `create-subscription`.
Creates a trail that specifies the settings for delivery of
log data to an Amazon S3 bucket.
Support for passing Trail as a parameter ends as early as
February 25, 2014. The request and response examples in this
topic show the use of parameters as well as a Trail object.
Until Trail is removed, you can use either Trail or the
parameter list.
:type name: string
:param name: Specifies the name of the trail.
:type s3_bucket_name: string
:param s3_bucket_name: Specifies the name of the Amazon S3 bucket
designated for publishing log files.
:type s3_key_prefix: string
:param s3_key_prefix: Specifies the Amazon S3 key prefix that precedes
the name of the bucket you have designated for log file delivery.
:type sns_topic_name: string
:param sns_topic_name: Specifies the name of the Amazon SNS topic
defined for notification of log file delivery.
:type include_global_service_events: boolean
:param include_global_service_events: Specifies whether the trail is
publishing events from global services such as IAM to the log
files.
:type trail: dict
:param trail: Support for passing a Trail object in the CreateTrail or
UpdateTrail actions will end as early as February 15, 2014. Instead
of the Trail object and its members, use the parameters listed for
these actions.
"""
params = {}
if name is not None:
params['Name'] = name
if s3_bucket_name is not None:
params['S3BucketName'] = s3_bucket_name
if s3_key_prefix is not None:
params['S3KeyPrefix'] = s3_key_prefix
if sns_topic_name is not None:
params['SnsTopicName'] = sns_topic_name
if include_global_service_events is not None:
params['IncludeGlobalServiceEvents'] = include_global_service_events
if trail is not None:
params['trail'] = trail
return self.make_request(action='CreateTrail',
body=json.dumps(params))
def delete_trail(self, name):
"""
Deletes a trail.
:type name: string
:param name: The name of a trail to be deleted.
"""
params = {'Name': name, }
return self.make_request(action='DeleteTrail',
body=json.dumps(params))
def describe_trails(self, trail_name_list=None):
"""
Retrieves the settings for some or all trails associated with
an account.
:type trail_name_list: list
:param trail_name_list: The list of trails.
"""
params = {}
if trail_name_list is not None:
params['trailNameList'] = trail_name_list
return self.make_request(action='DescribeTrails',
body=json.dumps(params))
def get_trail_status(self, name):
"""
Returns a JSON-formatted list of information about the
specified trail. Fields include information on delivery
errors, Amazon SNS and Amazon S3 errors, and start and stop
logging times for each trail.
The CloudTrail API is currently undergoing revision. This
action currently returns both new fields and fields slated for
removal from the API. The following lists indicate the plans
for each field:
**List of Members Planned for Ongoing Support**
+ IsLogging
+ LatestDeliveryTime
+ LatestNotificationTime
+ StartLoggingTime
+ StopLoggingTime
+ LatestNotificationError
+ LatestDeliveryError
**List of Members Scheduled for Removal**
+ **LatestDeliveryAttemptTime**: Use LatestDeliveryTime
instead.
+ **LatestNotificationAttemptTime**: Use
LatestNotificationTime instead.
+ **LatestDeliveryAttemptSucceeded**: No replacement. See the
note following this list.
+ **LatestNotificationAttemptSucceeded**: No replacement. See
the note following this list.
+ **TimeLoggingStarted**: Use StartLoggingTime instead.
+ **TimeLoggingStopped**: Use StopLoggingtime instead.
No replacements have been created for
LatestDeliveryAttemptSucceeded and
LatestNotificationAttemptSucceeded . Use LatestDeliveryError
and LatestNotificationError to evaluate success or failure of
log delivery or notification. Empty values returned for these
fields indicate success. An error in LatestDeliveryError
generally indicates either a missing bucket or insufficient
permissions to write to the bucket. Similarly, an error in
LatestNotificationError indicates either a missing topic or
insufficient permissions.
:type name: string
:param name: The name of the trail for which you are requesting the
current status.
"""
params = {'Name': name, }
return self.make_request(action='GetTrailStatus',
body=json.dumps(params))
def start_logging(self, name):
"""
Starts the recording of AWS API calls and log file delivery
for a trail.
:type name: string
:param name: The name of the trail for which CloudTrail logs AWS API
calls.
"""
params = {'Name': name, }
return self.make_request(action='StartLogging',
body=json.dumps(params))
def stop_logging(self, name):
"""
Suspends the recording of AWS API calls and log file delivery
for the specified trail. Under most circumstances, there is no
need to use this action. You can update a trail without
stopping it first. This action is the only way to stop
recording.
:type name: string
:param name: Communicates to CloudTrail the name of the trail for which
to stop logging AWS API calls.
"""
params = {'Name': name, }
return self.make_request(action='StopLogging',
body=json.dumps(params))
def update_trail(self, name=None, s3_bucket_name=None,
s3_key_prefix=None, sns_topic_name=None,
include_global_service_events=None, trail=None):
"""
From the command line, use `update-subscription`.
Updates the settings that specify delivery of log files.
Changes to a trail do not require stopping the CloudTrail
service. Use this action to designate an existing bucket for
log delivery. If the existing bucket has previously been a
target for CloudTrail log files, an IAM policy exists for the
bucket.
Support for passing Trail as a parameter ends as early as
February 25, 2014. The request and response examples in this
topic show the use of parameters as well as a Trail object.
Until Trail is removed, you can use either Trail or the
parameter list.
:type name: string
:param name: Specifies the name of the trail.
:type s3_bucket_name: string
:param s3_bucket_name: Specifies the name of the Amazon S3 bucket
designated for publishing log files.
:type s3_key_prefix: string
:param s3_key_prefix: Specifies the Amazon S3 key prefix that precedes
the name of the bucket you have designated for log file delivery.
:type sns_topic_name: string
:param sns_topic_name: Specifies the name of the Amazon SNS topic
defined for notification of log file delivery.
:type include_global_service_events: boolean
:param include_global_service_events: Specifies whether the trail is
publishing events from global services such as IAM to the log
files.
:type trail: dict
:param trail: Support for passing a Trail object in the CreateTrail or
UpdateTrail actions will end as early as February 15, 2014. Instead
of the Trail object and its members, use the parameters listed for
these actions.
"""
params = {}
if name is not None:
params['Name'] = name
if s3_bucket_name is not None:
params['S3BucketName'] = s3_bucket_name
if s3_key_prefix is not None:
params['S3KeyPrefix'] = s3_key_prefix
if sns_topic_name is not None:
params['SnsTopicName'] = sns_topic_name
if include_global_service_events is not None:
params['IncludeGlobalServiceEvents'] = include_global_service_events
if trail is not None:
params['trail'] = trail
return self.make_request(action='UpdateTrail',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| |
# http://blog.shayanjaved.com/2012/01/14/binary-search-tree-in-python/
# http://interactivepython.org/XSKWZ/LpOMZ/courselib/static/pythonds/Trees/bst.html
import timeit
import random
'''commented it out because its a dick if you run this without pointing it to a file to print in.'''
# import subprocess
from collections import deque
class BSTNode(object):
'''Instantiate Node and add helper functions'''
def __init__(self, val, parent=None, left_child=None, right_child=None):
self.val = val
self.parent = parent
self.left = left_child
self.right = right_child
self.height = 0
self.balance = 0
def is_root(self):
'''Helper function for root node'''
return not self.parent
def is_leaf(self):
''''Helper function for acknowledging leaf'''
return not (self.right_child or self.left_child)
def is_left(self):
'''Helper fuction for finding left child. Might be redundent with is_left
Will decide later........'''
if self.parent is None:
return self.parent
else:
return self is self.parent.left_child
def update_height(self, bubble_up=True):
'''If bubble_up is True, we go up the tree correcting height/balance
if not we will just correct the node'''
if self.left_child is None:
# set the left tree to zero
left_height = 0
else:
left_height = self.left_child.height + 1
if self.right_child is None:
# set the right tree to zero
right_height = 0
else:
right_height = self.right_child.height + 1
# we want to be able to balance even if we don't change the height
self.balance = left_height - right_height
height = max(left_height, right_height)
if self.height != height:
self.height = height
if self.parent is not None:
# We only bubble up if the height changes
if bubble_up:
self.parent.update_height()
def _get_dot(self):
"""recursively prepare a dot graph entry for this node."""
if self.left is not None:
yield "\t%s -> %s;" % (self.val, self.left.val)
for i in self.left._get_dot():
yield i
elif self.right is not None:
r = random.randint(0, 1e9)
yield "\tnull%s [shape=point];" % r
yield "\t%s -> null%s;" % (self.val, r)
if self.right is not None:
yield "\t%s -> %s;" % (self.val, self.right.val)
for i in self.right._get_dot():
yield i
elif self.left is not None:
r = random.randint(0, 1e9)
yield "\tnull%s [shape=point];" % r
yield "\t%s -> null%s;" % (self.val, r)
class BST(object):
'''Instantiate binary search tree'''
def __init__(self, vals=None):
self.root = None
self._size = 0
def size(self):
'''Will return integer size of BST'''
return self._size
def insert(self, val):
'''Inserts the data in val into BST'''
if self.root is None:
self.root = BSTNode(val)
self._size += 1
return
current_node = self.root
while True:
if current_node.val > val:
if current_node.left:
current_node = current_node.left
else:
current_node.left = BSTNode(val)
self._size += 1
break
elif current_node.val < val:
if current_node.right:
current_node = current_node.right
else:
current_node.right = BSTNode(val)
self._size += 1
break
else:
break
def contains(self, val):
'''Returns true if data in val is in BST'''
if self.root is None:
return False
current_node = self.root
while True:
if current_node.val > val:
if current_node.left:
current_node = current_node.left
else:
return False
elif current_node.val < val:
if current_node.right:
current_node = current_node.right
else:
return False
else:
return True
def depth(self):
'''Returns total number of levels in BST as interger'''
if self.root is None:
return 0
return self._depth(1, self.root)
def _depth(self, curr_depth, local_root):
'''Helper function for depth'''
l_depth = r_depth = 0
if local_root.left:
l_depth = self._depth(curr_depth + 1, local_root.left)
if local_root.right:
r_depth = self._depth(curr_depth + 1, local_root.right)
return max(curr_depth, l_depth, r_depth)
def is_balanced(self):
'''Return positive or negative integer to represent tree balance'''
ret_val = 0
if self.root is None:
return ret_val
if self.root.left:
ret_val += self._depth(1, self.root.left)
if self.root.right:
ret_val -= self._depth(1, self.root.right)
return ret_val
def height(self, node):
if node is None:
return -1
else:
return node.height
def get_dot(self):
"""return the tree with root 'self' as a dot graph for visualization"""
return "digraph G{\n%s}" % ("" if self.root.val is None else (
"\t%s;\n%s\n" % (
self.root.val,
"\n".join(self.root._get_dot())
)
))
def in_order(self):
return self._in_order(self.root)
def _in_order(self, leaf):
if leaf is None:
return
for val in self._in_order(leaf.left):
yield val
yield leaf.val
for val in self._in_order(leaf.right):
yield val
def pre_order(self):
return self._pre_order(self.root)
def _pre_order(self, leaf):
if leaf is None:
return
yield leaf.val
for val in self._pre_order(leaf.left):
yield val
for val in self._pre_order(leaf.right):
yield val
def post_order(self):
return self._post_order(self.root)
def _post_order(self, leaf):
if leaf is None:
return
for val in self._post_order(leaf.left):
yield val
for val in self._post_order(leaf.right):
yield val
yield leaf.val
def breadth_traversal(self):
x = deque()
x.append(self.root)
while x:
leaf = x.popleft()
yield leaf.val
if leaf.left:
x.append(leaf.left)
if leaf.right:
x.append(leaf.right)
def delete(self, val):
self.root = self._delete(val, self.root)
return None
def _delete(self, val, leaf):
def _descendants(leaf):
if leaf.left:
return _descendants(leaf.left)
else:
return leaf.val
if not leaf:
return None
if leaf.val == val:
self._size -= 1
if leaf.left and leaf.right:
leaf.val = _descendants(leaf.right)
leaf.right = self._delete(leaf.val, leaf.right)
return leaf
elif leaf.left and not leaf.right:
return leaf.left
elif not leaf.left and leaf.right:
return leaf.right
else:
return None
elif leaf.val < val:
if leaf.right:
leaf.right = self._delete(val, leaf.right)
return leaf
else:
if leaf.left:
leaf.left = self._delete(val, leaf.left)
return leaf
def l_rotate(self, node):
x = node.left
node.left = x.right
x.right = node
node.height = max(self.height(node.right), self.height(node.left)) + 1
x.height = max(self.height(x.left), node.height) + 1
return x
def r_rotate(self, node):
x = node.right
node.right = x.left
x.left = node
node.height = max(self.height(node.right), self.height(node.left)) + 1
x.height = max(self.height(x.right), node.height) + 1
return x
def ll_rotate(self, node):
node.left = self.r_rotate(node.left)
return self.l_rotate(node)
def rr_rotate(self, node):
node.right = self.l_rotate(node.right)
return self.r_rotate(node)
''' This is the insert function for the AVL tree that will balance itself on insert'''
def put(self, val):
if not self.root:
self.root = BSTNode(val)
else:
self.root = self._put(val, self.root)
def _put(self, val, node):
if node is None:
node = BSTNode(val)
elif val < node.val:
node.left = self._put(val, node.left)
if (self.height(node.left) - self.height(node.right)) == 2:
if val < node.left.val:
node = self.l_rotate(node)
else:
node = self.ll_rotate(node)
elif val > node.val:
node.right = self._put(val, node.right)
if (self.height(node.right) - self.height(node.left)) == 2:
if val < node.right.val:
node = self.rr_rotate(node)
else:
node = self.r_rotate(node)
node.height = max(self.height(node.right), self.height(node.left)) + 1
return node
def rotate_left(self, root):
left = root.is_left()
pivot = root.right_child
if pivot is None:
return
root.right_child = pivot.left_child
if pivot.left_child is not None:
root.right_child.parent = root
pivot.left_child = root
pivot.parent = root.parent
root.parent = pivot
if left is None:
self.root = pivot
elif left:
pivot.parent.left_child = pivot
else:
pivot.parent.right_child = pivot
root.update_height(False)
pivot.update_height(False)
def rotate_right(self, root):
left = root.is_left()
pivot = root.left_child
if pivot is None:
return
root.left_child = pivot.right_child
if pivot.right_child is not None:
root.left_child.parent = root
pivot.right_child = root
pivot.parent = root.parent
root.parent = pivot
if left is None:
self.root = pivot
elif left:
pivot.parent.left_child = pivot
else:
pivot.parent.right_child = pivot
root.update_height(False)
pivot.update_height(False)
def find_leftmost(self, node):
if node.left_child is None:
return node
else:
return self.find_leftmost(node.left_child)
def find_rightmost(self, node):
if node.right_child is None:
return node
else:
return self.find_rightmost(node.right_child)
def find_next(self, val):
node = self.find(val)
if (node is None) or (node.val != val):
return None
else:
right_child = node.right_child
if right_child is not None:
node = self.find_leftmost(right_child)
else:
parent = node.parent
while(parent is not None):
if node is parent.left_child:
break
node = parent
parent = node.parent
node = parent
if node is None:
return node
else:
return node.val
def find_prev(self, val):
node = self.find(val)
if (node is None) or (node.val != val):
return None
else:
left_child = node.left_child
if left_child is not None:
node = self.find_leftmost(left_child)
else:
parent = node.parent
while(parent is not None):
if node is parent.right_child:
break
node = parent
parent = node.parent
node = parent
if node is None:
return node
else:
return node.val
def find(self, val, node=None):
if node is None:
node = self.root
if self.root is None:
return None
else:
return self.find(val, self.root)
elif node.val == val:
return node
elif val < node.val:
if node.left_child is None:
return node
else:
return self.find(val, node.left_child)
else:
if node.right_child is None:
return node
else:
return self.find(val, node.right_child)
def balance(self, node):
''' There are four posabilities for rotation
left-left=LL right-right=RR
left-right=LR right-left=RL'''
node.update_height(False)
if node.balance == 2:
if node.left_child.balance != -1:
# LL rotation
self.rotate_right(node)
if node.parent.parent is not None:
self.balance(node.parent.parent)
else:
# LR rotation
self.rotate_left(node.left_child)
self.balance(node)
elif node.balance == -2:
if node.right_child.balance != 1:
# RR rotation
self.rotate_left(node)
if node.parent.parent is not None:
self.balance(node.parent.parent)
else:
# RL rotation
self.rotate_right(node.right_child)
self.balance(node)
else:
if node.parent is not None:
self.balance(node.parent)
def sort(self, tree_maker, ascending=True):
b = BST()
for item in tree_maker:
b.insert(item)
ret_val = []
if ascending:
node = b.find_leftmost(b.root)
if node is not None:
val = node.val
else:
val = node
while (val is not None):
ret_val.append(val)
val = b.find_next(val)
else:
node = b.find_rightmost(b.root)
if node is not None:
val = node.val
else:
val = node
while (val is not None):
ret_val.append(val)
val = b.find_prev(val)
return ret_value
if __name__ == '__main__':
# x = range(100)
# bst = BST()
# for i in x:
# bst.put(i)
# dot_graph = bst.get_dot()
# t = subprocess.Popen(["dot", "-Tpng"], stdin=subprocess.PIPE)
# t.communicate(dot_graph)
def easy_tree():
x = random.sample(range(100), 100)
bst = BST()
bst.insert(50)
for i in x:
bst.insert(i)
bst.insert(42.1)
bst.contains(42.1)
def hard_tree():
x = range(100)
bst = BST()
for i in x:
bst.insert(i)
bst.insert(42.1)
bst.contains(42.1)
print(timeit.Timer("easy_tree()", setup="from __main__ import easy_tree").timeit(number=1000))
print(timeit.Timer("hard_tree()", setup="from __main__ import hard_tree").timeit(number=1000))
| |
#!/usr/bin/python
"""program to populate the omero_id into the imageObservation table so we can then index them with pure java from the database and solr experiment index"""
import os
import sys
import os.path
import argparse
import psycopg2
import csv
from OmeroPropertiesParser import OmeroPropertiesParser
def main(argv):
print "running main method of get_omero_ids - using postgresQL directly!!"
parser = argparse.ArgumentParser(
description='Populate omero_ids into a csv file so they can be included in the images core. This version uses postgresQl directly and was implemented for DR12 (30/06/2020)'
)
parser.add_argument('-i', '--input-file', dest='inputFilePath',
required=True,
help='Path to CSV file contiaining images info'
)
parser.add_argument('-o', '--output-file', dest='outputFilePath',
required=True,
help='Path to write CSV file with omero ids'
)
parser.add_argument('--omeroDbUser', dest='omeroDbUser',
help='name of the omero postgres database')
parser.add_argument('--omeroDbPass', dest='omeroDbPass',
help='Password for the omero postgress database')
parser.add_argument('--omeroDbName', dest='omeroDbName',
help='Name of the postgres database omero uses')
parser.add_argument('--omeroDbHost', dest='omeroDbHost',
help='Hostname for the server hosting the omero postgres database')
parser.add_argument('--omeroDbPort', dest='omeroDbPort',
help='Port to connect on the postgres server hosting the omero database')
parser.add_argument('--profile', dest='profile', default='dev',
help='Name of profile from which to read config: ' + \
'dev, prod, live, ... Assumed to be present ' + \
'in configfiles/profilename/application.properties'
)
parser.add_argument('--profile-path', dest='profilePath',
help='Explicit path to file from which to read ' + \
'profile e.g. ' + \
'/home/kola/configfiles/dev/application.properties'
)
parser.add_argument('-d', '--rootDestinationDir', dest='rootDestinationDir',
help='Root directory for destination files were downloaded to'
)
args = parser.parse_args()
# Get values from property file and use as defaults that can be overridden
# by command line parameters
if args.profilePath is not None:
try:
pp = OmeroPropertiesParser()
omeroProps = pp.getOmeroProps(args.profilePath)
except Exception as e:
print "Could not read application properties file from " + args.profilePath
print "Error was: " + str(e)
return
else:
try:
pp = OmeroPropertiesParser(args.profile)
omeroProps = pp.getOmeroProps()
except Exception as e:
print "Could not read application properties file for profile " + args.profile
print "Error was: " + str(e)
return
try:
root_dir = args.rootDestinationDir if args.rootDestinationDir<>None else omeroProps['rootdestinationdir']
# Remove initial '/' if it exists. This is not present in omero db
if root_dir[0] == '/':
root_dir = root_dir[1:]
except Exception as e:
print "Could not assign root_dir from either command line or properties file. Did you specify the right profile? Error message was: " + str(e)
return
# Get Postgres connection for directly querying omero database
try:
print "Attempting to connect directly to Postgres DB"
omeroDbUser = args.omeroDbUser if args.omeroDbUser is not None else omeroProps['omerodbuser']
omeroDbPass = args.omeroDbPass if args.omeroDbPass is not None else omeroProps['omerodbpass']
omeroDbName = args.omeroDbName if args.omeroDbName is not None else omeroProps['omerodbname']
omeroDbHost = args.omeroDbHost if args.omeroDbHost is not None else omeroProps['omerodbhost']
if args.omeroDbPort is not None:
omeroDbPort = args.omeroDbPort
elif 'omerodbport' in omeroProps:
omeroDbPort = omeroProps['omerodbport']
else:
omeroDbPort = '5432'
psqlConn = psycopg2.connect(database=omeroDbName, user=omeroDbUser,
password=omeroDbPass, host=omeroDbHost,
port=omeroDbPort)
print "Connected to Postgres DB"
except KeyError as e:
print "Could not connect to omero postgres database. Key " + str(e) + \
" not present in omero properties file. Aborting!"
sys.exit()
except Exception as e:
print "Could not connect to omero postgres database. Error: " + str(e)
sys.exit()
# Get project and Dataset ids for querying omero image records
project_dict = get_project_and_dataset_ids(psqlConn)
# Count number of rows to process
with open(args.inputFilePath, 'rb') as fid:
csv_reader = csv.reader(fid)
n_rows = sum(1 for row in csv_reader)
str_n_rows = str(n_rows-1)
# Get handle to csv file and update records
rows_processed = 0
omero_ids_obtained = 0
with open(args.inputFilePath, 'rb') as fid:
pg_cur = psqlConn.cursor()
csv_reader = csv.reader(fid)
# Process header
# For Omero 5.6.3 (Python3 - use pandas dataframe)
header = csv_reader.next()
try:
download_file_path_idx = header.index("download_file_path")
phenotyping_center_idx = header.index("phenotyping_center")
pipeline_stable_idx = header.index("pipeline_stable_id")
procedure_stable_idx = header.index("procedure_stable_id")
parameter_stable_idx = header.index("parameter_stable_id")
except ValueError as e:
print "Fatal Error:"
print str(e), header
print "Exiting"
sys.exit(-1)
header += ["omero_id",]
# Get handle for writing updated records
with open(args.outputFilePath, 'wb') as fid_out:
csv_writer = csv.writer(fid_out)
csv_writer.writerow(header)
# Update omero ids for each line in input
for row in csv_reader:
rows_processed += 1
if rows_processed % 1000 == 0:
print "Processed " + str(rows_processed) + " of " + str_n_rows
download_file_path=row[download_file_path_idx].lower()
if (download_file_path.find('mousephenotype.org') < 0 and \
download_file_path.find('file:') < 0) or \
download_file_path.endswith('.mov') or \
download_file_path.endswith('.fcs') or \
download_file_path.endswith('.nrrd') or \
download_file_path.endswith('.bz2'):
row.append("-1")
csv_writer.writerow(row)
continue
project_name = row[phenotyping_center_idx]
pipeline_stable_id = row[pipeline_stable_idx]
procedure_stable_id = row[procedure_stable_idx]
parameter_stable_id = row[parameter_stable_idx]
imagename = os.path.split(download_file_path)[-1]
image_nfs_path = os.path.join(root_dir, project_name,pipeline_stable_id,procedure_stable_id,parameter_stable_id,imagename)
dataset_name = "-".join([project_name, pipeline_stable_id, procedure_stable_id, parameter_stable_id])
try:
project_ids = project_dict[project_name].keys()
except KeyError as e:
message = "ERROR: Could not get project details for image " + imagename + " in dataset " + dataset_name + ". KeyError was: " + str(e)
print message
row.append("-1")
csv_writer.writerow(row)
continue
# In the following loop we search for the omero ID using the
# project name and dataset name. This is complicated by
# some project names and/or dataset names being duplicated
# in omero. We therefore map the project names and dataset
# names to their respective keys and loop through both
# project ids and dataset ids. We exit
# the loop(s) once a valid omero_id has been found.
omero_id = -1
error_message = ""
for project_id in project_ids:
# If we cannot find the project ID we don't even bother
# going further.
try:
dataset_ids = project_dict[project_name][project_id][dataset_name]
except KeyError as e:
error_message += "ERROR: Could not get dataset details for image " + imagename + " in dataset " + dataset_name + ". KeyError was: " + str(e) + "\n"
continue
for dataset_id in dataset_ids:
if imagename.endswith('pdf'):
image_nfs_dir = os.path.join("/",root_dir, project_name,pipeline_stable_id,procedure_stable_id,parameter_stable_id)
query = "SELECT a.id FROM annotation a " + \
"INNER JOIN datasetannotationlink dsal ON " +\
"a.id=dsal.child " + \
"INNER JOIN originalfile of ON a.file=of.id " + \
"WHERE dsal.parent=" + str(dataset_id) + \
" AND of.path='" + image_nfs_dir + "'" + \
" AND of.name='" + imagename + "'"
# Special case for 3i Ear epidemis images
elif imagename.find('.lif')>0 or imagename.find('.lei')>0:
query = "SELECT DISTINCT i.id, i.name FROM " + \
"image i INNER JOIN " + \
"datasetimagelink dsil ON i.id=dsil.child " + \
"INNER JOIN filesetentry fse ON " + \
"i.fileset=fse.fileset " + \
"WHERE LOWER(i.name)='" + imagename + "'"
else:
query = "SELECT i.id FROM image i INNER JOIN " + \
"datasetimagelink dsil ON i.id=dsil.child " + \
"INNER JOIN filesetentry fse ON " + \
"i.fileset=fse.fileset " + \
"WHERE dsil.parent=" + str(dataset_id) + \
" AND LOWER(fse.clientpath)=LOWER('" + image_nfs_path + \
"') AND LOWER(i.name)='" + imagename + "'"
pg_cur.execute(query)
omero_ids = pg_cur.fetchall()
n_omero_ids = len(omero_ids)
if n_omero_ids == 0:
error_message += "ERROR: Got 0 omero_ids instead of 1. Not updating omero_id for " + image_nfs_path + "\n"
elif n_omero_ids > 1:
error_message = "WARNING: Got " + str(n_omero_ids) + " omero_ids instead of 1 - using last in list for " + image_nfs_path + "\n"
omero_id = omero_ids[-1][0]
omero_ids_obtained += 1
break
else:
# We have found a valid omero_id - exit the loop
error_message = ""
omero_id = omero_ids[0][0]
omero_ids_obtained += 1
break
# If we have a valid omero ID this record has been
# successfully processed - move to next one
if omero_id != -1:
break
if len(error_message) > 0:
print error_message
row.append(str(omero_id))
csv_writer.writerow(row)
psqlConn.close()
print "Got " + str(omero_ids_obtained) + " omero ids from " + str_n_rows + " records"
def get_project_and_dataset_ids(psqlConn):
"""
return dict with all projects and dataset ids in omero
"""
pg_cur = psqlConn.cursor()
query = "SELECT id FROM experimenter WHERE lastname='root'"
pg_cur.execute(query)
my_expId = str(pg_cur.fetchone()[0])
query = "SELECT id, name FROM project WHERE owner_id=" + my_expId
pg_cur.execute(query)
projects = pg_cur.fetchall()
project_dict = {}
for project_id, project_name in projects:
if len(project_name) > 0:
if project_dict.has_key(project_name):
project_dict[project_name][project_id] = {}
else:
project_dict[project_name] = {project_id: {},}
query = "Select ds.id, ds.name from dataset ds inner join projectdatasetlink pdsl on ds.id=pdsl.child where pdsl.parent="+str(project_id)
pg_cur.execute(query)
datasets = pg_cur.fetchall()
#project_dict[project_name]['datasets'] = {}
for dataset_id, dataset_name in datasets:
if project_dict[project_name][project_id].has_key(dataset_name):
project_dict[project_name][project_id][dataset_name].append(dataset_id)
else:
project_dict[project_name][project_id][dataset_name] = [dataset_id,]
return project_dict
if __name__ == "__main__":
main(sys.argv[1:])
| |
import sys
import logging
from time import time
import traceback
import anyjson
from django.http import HttpResponse
from django.contrib.admin.models import LogEntry
from django.db.models.aggregates import Max
from django.core.urlresolvers import reverse
from ella_newman import models
from ella_newman.conf import newman_settings
from ella.core.models import Category
log = logging.getLogger('newman')
class Profiler:
"Provides measurement of time spent in named blocks of code."
def __init__(self):
self.timers = []
def create_timer(self, name):
t = ProfilerTimer(name)
self.timers.append(t)
return t
def create_started_timer(self, name):
t = self.create_timer(name)
t.start()
return t
def log_summary(self, logger_callback):
total = 0.0
for t in self.timers:
logger_callback(
'%05.03f msec elapsed in %s' %
(
t.get_msec(),
t.name,
)
)
total += t.get_msec()
logger_callback('TOTAL: %05.03f msec.' % total)
@property
def has_data(self):
return len(self.timers) > 0
def reset(self):
for i in range(len(self.timers)):
self.timers.pop()
# this instance should be used for creating timers and profiling blocks of code
PROFILER = Profiler()
class ProfilerTimer:
"Measures one named block of code."
def __init__(self, name):
self.name = name
self.elapsed_time = 0.0
self.begin = 0.0
def get_sec(self):
return self.elapsed_time
def get_msec(self):
return self.elapsed_time * 1000
def start(self):
if self.begin:
raise AttributeError('Timer already started!')
self.begin = time()
def stop(self):
if not self.begin:
raise AttributeError('Timer not started!')
self.elapsed_time = time() - self.begin
def profiled_section(func):
def decorated(*args, **kwargs):
trac = traceback.extract_stack()[-2]
caller = '[%s] (%s:%d)' % (trac[2], trac[0], trac[1])
name = '[%s] called from %s' % (func.__name__, caller)
prof = PROFILER.create_started_timer(name)
out = func(*args, **kwargs)
prof.stop()
return out
return decorated
def json_encode(data):
""" Encode python data into JSON. Try faster cjson first. """
return anyjson.serialize(data)
def json_decode(str):
""" Decode JSON string into python. """
return anyjson.deserialize(str)
def JsonResponse(message, data={}, errors={}, status=newman_settings.STATUS_OK, http_status=newman_settings.HTTP_OK):
""" Return JSON response in newman's standard format. """
out_dict = {
'status': status,
'message': message,
}
if data:
# if data contains JSON data, first try to decode them
if isinstance(data, (str, unicode)):
try:
data = json_decode(data)
except ValueError, e:
log.info('%s, data=[%s]' % (e, data))
out_dict['data'] = data
if errors:
http_status = 405
out_dict['errors'] = errors
out = json_encode(out_dict)
return HttpResponse(out, mimetype='text/plain;charset=utf-8', status=http_status)
def JsonResponseError(message, status=newman_settings.STATUS_GENERIC_ERROR):
""" use this function if one message describes error well, so """
return JsonResponse(message, status=status, http_status=newman_settings.HTTP_ERROR)
def JsonResponseRedirect(location):
" Returns HTTP 200 response containing JSON dict with redirect_to field. "
out_dict = {
'status': newman_settings.STATUS_JSON_REDIRECT,
'redirect_to': location
}
out = json_encode(out_dict)
response = HttpResponse(out, mimetype='text/plain;charset=utf-8', status=newman_settings.HTTP_OK)
response['Redirect-To'] = location
return response
def decode_category_filter_json(data):
decoded = json_decode(data)
return map(lambda cid: int(cid), decoded)
def set_user_config(user, key, value):
""" sets user defined configuration to user.config and to session as well. """
if not hasattr(user, newman_settings.USER_CONFIG):
setattr(user, newman_settings.USER_CONFIG, {})
cfg = getattr(user, newman_settings.USER_CONFIG)
cfg[key] = value
def set_user_config_db(user, key, value):
# set AdminSetting data
obj, status = models.AdminSetting.objects.get_or_create(
user = user,
var = key
)
obj.val = '%s' % json_encode(value)
obj.save()
def set_user_config_session(session, key, value):
# set session data
if newman_settings.USER_CONFIG not in session:
session[newman_settings.USER_CONFIG] = dict()
conf = session[newman_settings.USER_CONFIG]
callback = _get_decoder(key)
if not callback:
conf[key] = value
else:
# As there is JSON decode callback, keep data in session decoded.
conf[key] = callback(json_encode(value))
session[key] = conf
def _get_decoder(key):
for k, v in newman_settings.JSON_CONVERSIONS:
if k == key:
return getattr(sys.modules[__name__], v)
def get_user_config(user, key):
"""
Returns user defined configuration from user.config with fallback to AdminSetting.
If AdminSetting is reached data_decode_callback is used to transform saved data
from JSON to proper format (i.e. all list items convert to int). Default
data_decode_callback only decodes data from JSON.
"""
cfg = getattr(user, newman_settings.USER_CONFIG, {})
if key not in cfg:
try:
db_data = models.AdminSetting.objects.get(user=user, var=key)
except models.AdminSetting.DoesNotExist:
return None
# find appropriate callback to convert JSON data.
callback = _get_decoder(key)
if not callback:
callback = json_decode
return callback(db_data.value)
return cfg[key]
def flag_queryset(queryset, flag, value):
if not hasattr(queryset, '_filter_flags'):
setattr(queryset, '_filter_flags', dict())
queryset._filter_flags[flag] = value
def get_queryset_flag(queryset, flag):
if not hasattr(queryset, '_filter_flags'):
return False
return queryset._filter_flags.get(flag, False)
def copy_queryset_flags(qs_dest, qs_src):
setattr(qs_dest, '_filter_flags', getattr(qs_src, '_filter_flags', {}))
def user_category_filter(queryset, user):
"""
Returns Queryset containing only user's prefered content (filtering based on categories).
If queryset.model has no relation to ella.core.models.Category, original queryset is returned.
"""
from ella_newman.permission import model_category_fk, is_category_model
qs = queryset
category_fk = model_category_fk(qs.model)
if not category_fk:
return qs
root_category_ids = get_user_config(user, newman_settings.CATEGORY_FILTER)
if not root_category_ids: # user has no custom category filter set or his filter set is empty.
return qs
if not user.is_superuser:
helper = models.DenormalizedCategoryUserRole.objects.filter(
user_id=user.pk,
root_category_id__in=root_category_ids
).values('category_id')
user_categories = [c['category_id'] for c in helper]
if is_category_model(qs.model):
lookup = 'id__in'
else:
lookup = '%s__in' % category_fk.name
out = qs.filter(**{lookup: user_categories})
else:
cats = Category.objects.filter(pk__in=root_category_ids).values('site__pk')
user_sites = [c['site__pk'] for c in cats]
if is_category_model(qs.model):
lookup = 'site__id__in'
else:
lookup = '%s__site__id__in' % category_fk.name
out = qs.filter(**{lookup: user_sites})
flag_queryset(out, 'user_category_filtered', True)
return out
def is_user_category_filtered(queryset):
return get_queryset_flag(queryset, 'user_category_filtered')
def get_log_entries(limit=15, filters={}):
entry_ids = LogEntry.objects.values('object_id', 'content_type_id').annotate(last_edit=Max('action_time'), max_id=Max('id')).filter(**filters).order_by('-last_edit')[:limit]
return LogEntry.objects.filter(pk__in=[i['max_id'] for i in entry_ids])
# newman url for object for other apps, FEs...
def get_newman_url(obj):
"""return valid admin edit page url"""
model = obj.__class__
info = model._meta.app_label, model._meta.module_name
return reverse('newman:%s_%s_change' % info, args=(obj._get_pk_val(),))
# from text.py (ella.utils)
import unicodedata
def __unicode_to_ascii(text):
""" discards diacritical marks from unicode text """
line = unicodedata.normalize('NFKD', text)
return ''.join(c for c in line if not unicodedata.combining(c))
def cz_compare(a, b):
""" a, b parameters should be strings or unicodes. """
ma = __unicode_to_ascii(unicode(a))
mb = __unicode_to_ascii(unicode(b))
return cmp(ma, mb)
| |
import numpy as np
from pyne import material
from pyne.simplesim import cards
from pyne.simplesim import definition
from pyne.simplesim import inputfile
import pyne.simplesim.nestedgeom as ng
class InfLattice(object):
"""Creates an MCNP input file named `ex_simplesim_inflattice` for a 2-cell
(fuel and moderator) infinite lattice reactor. The user executes this code
by instantiating an object of this class and calling :py:meth:`write`::
inflat = InfLattice()
inflat.write()
as is done below. The user can then manipulate the input, and observe the
change in the output::
inflat.pin.radius = 0.45
inflat.write()
inflat.sim.source['criticality'].keff_guess = 1.5
inflat.write()
"""
def __init__(self):
## Define the system: materials, surfaces, regions, cells.
self.sys = definition.SystemDefinition(verbose=False)
## Materials.
# Must provide a name as a keyword argument for material cards. See the
# documentation for :py:mod:`pyne.material` for more information.
uo2 = material.from_atom_frac({'U235': 0.05, 'U238': 0.95, 'O16' : 2.00})
self.uo2 = cards.Material(uo2, name='UO2')
h2o = material.from_atom_frac({'H1' : 2.0, 'O16': 1.0}, attrs={'name': 'H2O'})
self.h2o = cards.Material(h2o)
## Surfaces.
# There are two surfaces: one for the pin and one for the unit cell
# boundary.
radius = 0.40
# This creates an axis-aligned and axis-centered cylinder along the z
# axis, with radius 0.40 cm.
self.pin = cards.AxisCylinder('pin', 'Z', radius)
# The Parallelepiped is a macrobody. The surface is reflecting,
# creating an infinte geometry. The surface is infinite in the z
# direction.
pitch = 1.2
self.cellbound = cards.Parallelepiped('bound',
-pitch / 2, pitch / 2, -pitch / 2, pitch / 2, 0, 0,
reflecting=True)
## Cells.
# We combine the materials and surfaces above into cells. We use MCNP
# cells in order to specify particle importances and volumes directly
# on the cell card. We could alternatively use the
# :py:class:`Importance` and :py:class:`Volume` cards.
# fuel cell.
# The fuel is the region of space inside the pin, pin.neg.
self.fuelregion = self.pin.neg
# The neutron importance is 1, and the user-provided volume is 1 cm^3.
self.fuel = cards.CellMCNP('fuel', self.fuelregion, self.uo2,
11.0, 'g/cm^3',
importance=('neutron', 1),
volume=1)
# coolant cell.
# The region is between the pin and the unit cell boundary.
self.coolantregion = self.pin.pos | self.cellbound.neg
self.coolant = cards.CellMCNP('coolant', self.coolantregion, self.h2o,
1.0, 'g/cm^3',
importance=('neutron', 1),
volume=1)
# graveyard cell: where particles go to die.
# The region is everything beyond the unit cell boundary.
self.graveyardregion = self.cellbound.pos
# This is a void cell, meaning it does not have a material.
self.graveyard = cards.CellMCNP('graveyard', self.graveyardregion,
importance=('neutron', 0))
# We add the cells to the system. The order we add them is the order
# they are printed in the input file.
self.sys.add_cell(self.fuel)
# We can add multiple cells at once.
self.sys.add_cell(self.coolant, self.graveyard)
## Define the simulation: sources, tallies, misc. Don't clutter the
# command window.
self.sim = definition.MCNPSimulation(self.sys, verbose=False)
# Specify a thermal scattering law for the H2O material. This is a
# unique card per material.
self.sim.add_misc(cards.ScatteringLaw('H2O', {'H1': 'lwtr'}))
# Add a criticality source, use default values. This is a unique card,
# so we do not provide a card name.
self.sim.add_source(cards.Criticality())
# Add points at which to start neutrons; use default point (0, 0, 0).
self.sim.add_source(cards.CriticalityPoints())
# Tally neutron flux in both the fuel and coolant cells.
self.sim.add_tally(cards.CellFlux('flux', 'neutron',
['fuel', 'coolant']))
# The energy grid on which to tally neutrons, applied to all tallies.
self.sim.add_misc(cards.EnergyGrid('egrid0', None,
10**np.arange(-9.9, 1.1, 0.1)))
def write(self):
"""Writes the input to 'ex_simplesim_inflattice'."""
# Create input file, specifying the title of the input.
self.inp = inputfile.MCNPInput(self.sim, title="Infinite lattice.")
self.inp.write('ex_simplesim_inflattice')
if __name__ == '__main__':
# Create all relevant objects for the infinite lattice example.
inflat = InfLattice()
# Write to a file.
inflat.write()
###################################
#### Some example Notes follow! ###
###################################
"""
# super brief
rxr = simplesim.definition.SystemDefinition()
pinsurf = cards.AxisCylinder('fuelpin', 'x', 0.40)
rxr.add_cell(cards.CellMCNP('fuel', pinsurf.neg,
material.from_atom_frac({'U235': 0.05, 'U238': 0.95, 'O16': 2.0},
name='UO2'),
neutron_imp=1))
pitch = 1.2
boundsurf = cards.Parallelepiped('bound',
-pitch / 2, pitch / 2, -pitch / 2, pitch / 2,
0, 0, reflecting=True),
rxr.add_cell(cards.CellMCNP('coolant', pinsurf.pos & boundsurf.neg,
material.from_atom_frac({'H1': 2.0, 'O16': 1.0}, name='H2O'),
neutron_imp=1))
rxr.add_cell(cards.CellVoidMCNP('graveyard', boundsurf.pos, neutron_imp=0))
"""
"""
# Geometry and materials
# Materials.
uo2 = material.from_atom_frac({'U235': 0.05,
'U238': 0.95,
'O16' : 2.0}, name='UO2')
h2o = material.from_atom_frac({'H1' : 2.0,
'O16': 1.0}, name='H2O')
# Surfaces.
radius = 0.40 # cm
pin = cards.AxisCylinder('fuelpin', 'X', radius)
pitch = 1.2 # cm
cellbound = cards.Parallelepiped('bound',
-pitch / 2, pitch / 2, -pitch / 2, pitch / 2, 0, 0,
reflecting=True)
# Cells.
fuel = cards.CellMCNP('fuel', pin.neg, uo2, 11.0, 'g/cm^3',
neutron_imp=1)
coolant = cards.CellMCNP('coolant', pin.pos & cellbound.neg, h2o,
1.0, 'g/cm^3',
neutron_imp=1)
graveyard = cards.CellVoidMCNP('graveyard', cellbound.pos, neutron_imp=0)
# Create system definition from the cards above.
rxr = definition.SystemDefinition()
rxr.add_cell(fuel)
rxr.add_cell(coolant)
rxr.add_cell(graveyard)
# The system definition is complete.
# Simulation definition.
sim = definition.MCNPSimulation(rxr)
sim.add_source(cards.Criticality())
sim.add_source(cards.CriticalityPoints())
fueltally = cards.CellFlux('fuel', 'neutron', fuel)
coolanttally = cards.CellFlux('coolant', 'neutron', coolant)
egrid = cards.EnergyGrid('grid0', None, 10**np.arange(-9.9, 1.1, .1))
sim.add_tally(fueltally)
sim.add_tally(coolanttally)
sim.add_misc(egrid)
inp = inputfile.MCNPInput("input1", sim)
print fuel.mcnp(sim)
#rxr.save('test')
"""
"""
#
opts = definition.SimulationDefinition(rxr)
opts.add_card(cards.CriticalitySource(1000, 1, 1, 1))
opts.add_card(cards.CriticalityPoints([[0, 0, 0]]))
rxr.save("system1")
opts.save("options1")
enrichments = [0.01 0.02 0.03]
for this_enrich in enrichments:
rxr.material['UO2'] = material(blah blah)
inp = MCNPInput("input1", rxr, opts)
inp.write()
inp.system.material['UO2'] = material(blah blah)
inp.options.card[crit_source_name].cycles = 1000
inp.write()
"""
"""
# Create cards.
channel = cards.AxisCylinder("channel", 'X', 2.54)
leftbound = cards.AxisPlane("leftbound", 'X', -500.0)
rightbound = cards.AxisPlane("rightbound", 'X', 500.0)
polycyl = cards.AxisCylinder("polycyl", 'X', 17.54)
tungstencyl = cards.AxisCylinder("tungstencyl", 'X', 27.54)
coppercyl = cards.AxisCylinder("coppercyl", 'X', 28.04)
shieldleft = cards.AxisPlane("shieldleft", 'X', -25.0)
shieldright = cards.AxisPlane("shieldright", 'X', 25.0)
aperturecyl = cards.AxisCylinder("aperturecyl", 'Z', 0.25)
half = cards.AxisPlane("half", 'Z', 0.0)
gravecyl = cards.AxisCylinder("gravecyl", 'X', 33.04)
# Make regions.
pipemid = leftbound.pos & rightbound.neg # & channel.neg
polyshield = (shieldleft.pos & shieldright.neg &
channel.pos & polycyl.neg &
aperturecyl.pos )
tungstenshield = (shieldleft.pos & shieldright.neg &
polycyl.pos & tungstencyl.neg &
aperturecyl.pos)
coppershield = (shieldleft.pos & shieldright.neg &
tungstencyl.pos & coppercyl.neg &
aperturecyl.pos)
aperture = aperturecyl.neg & channel.pos & coppercyl.neg
usefulvoid = ((channel.pos & gravecyl.neg &
leftbound.pos & shieldleft.neg)
|
(channel.pos & gravecyl.neg &
shieldright.pos & rightbound.neg)
|
(tungstencyl.pos & gravecyl.neg &
shieldleft.pos & shieldright.neg))
uselessvoid = (leftbound.neg | rightbound.pos |
(leftbound.pos & leftbound.neg &
gravecyl.pos))
print pipemid.comment()
print polyshield.comment()
"""
"""
rxr.add_lattice
rxr.add_universe
cards.CellbyUniverse
perhaps add all surfaces and materials at once.
"""
# TODO show a bunch of ways to do a single simulation.
| |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# EDITING INSTRUCTIONS
# This file was generated from the file
# https://github.com/google/googleapis/blob/master/google/logging/v2/logging.proto,
# and updates to that file get reflected here through a refresh process.
# For the short term, the refresh process will only be runnable by Google engineers.
#
# The only allowed edits are to method and file documentation. A 3-way
# merge preserves those additions if the generated source changes.
"""Accesses the google.logging.v2 LoggingServiceV2 API."""
import json
import os
import pkg_resources
import platform
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
import google.gax
from google.api import monitored_resource_pb2
from google.logging.v2 import log_entry_pb2
from google.logging.v2 import logging_pb2
_PageDesc = google.gax.PageDescriptor
class LoggingServiceV2Api(object):
"""Service for ingesting and querying logs."""
SERVICE_ADDRESS = 'logging.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
_CODE_GEN_NAME_VERSION = 'gapic/0.1.0'
_GAX_VERSION = pkg_resources.get_distribution('google-gax').version
_PAGE_DESCRIPTORS = {
'list_log_entries': _PageDesc('page_token', 'next_page_token',
'entries'),
'list_monitored_resource_descriptors':
_PageDesc('page_token', 'next_page_token', 'resource_descriptors')
}
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = ('https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/cloud-platform.read-only',
'https://www.googleapis.com/auth/logging.admin',
'https://www.googleapis.com/auth/logging.read',
'https://www.googleapis.com/auth/logging.write', )
_PROJECT_PATH_TEMPLATE = path_template.PathTemplate('projects/{project}')
_LOG_PATH_TEMPLATE = path_template.PathTemplate(
'projects/{project}/logs/{log}')
@classmethod
def project_path(cls, project):
"""Returns a fully-qualified project resource name string."""
return cls._PROJECT_PATH_TEMPLATE.render({'project': project, })
@classmethod
def log_path(cls, project, log):
"""Returns a fully-qualified log resource name string."""
return cls._LOG_PATH_TEMPLATE.render({
'project': project,
'log': log,
})
@classmethod
def match_project_from_project_name(cls, project_name):
"""Parses the project from a project resource.
Args:
project_name (string): A fully-qualified path representing a project
resource.
Returns:
A string representing the project.
"""
return cls._PROJECT_PATH_TEMPLATE.match(project_name).get('project')
@classmethod
def match_project_from_log_name(cls, log_name):
"""Parses the project from a log resource.
Args:
log_name (string): A fully-qualified path representing a log
resource.
Returns:
A string representing the project.
"""
return cls._LOG_PATH_TEMPLATE.match(log_name).get('project')
@classmethod
def match_log_from_log_name(cls, log_name):
"""Parses the log from a log resource.
Args:
log_name (string): A fully-qualified path representing a log
resource.
Returns:
A string representing the log.
"""
return cls._LOG_PATH_TEMPLATE.match(log_name).get('log')
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
metadata_transformer=None,
ssl_creds=None,
scopes=None,
client_config=None,
app_name='gax',
app_version=_GAX_VERSION):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.beta.implementations.Channel`): A ``Channel``
object through which to make calls.
ssl_creds (:class:`grpc.beta.implementations.ClientCredentials`):
A `ClientCredentials` for use with an SSL-enabled channel.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
metadata_transformer (Callable[[], list]): A function that creates
the metadata for requests.
app_name (string): The codename of the calling service.
app_version (string): The version of the calling service.
Returns:
A LoggingServiceV2Api object.
"""
if scopes is None:
scopes = self._ALL_SCOPES
if client_config is None:
client_config = {}
goog_api_client = '{}/{} {} gax/{} python/{}'.format(
app_name, app_version, self._CODE_GEN_NAME_VERSION,
self._GAX_VERSION, platform.python_version())
metadata = [('x-goog-api-client', goog_api_client)]
default_client_config = json.loads(
pkg_resources.resource_string(
__name__, 'logging_service_v2_client_config.json').decode())
defaults = api_callable.construct_settings(
'google.logging.v2.LoggingServiceV2',
default_client_config,
client_config,
config.STATUS_CODE_NAMES,
kwargs={'metadata': metadata},
page_descriptors=self._PAGE_DESCRIPTORS)
self.stub = config.create_stub(
logging_pb2.beta_create_LoggingServiceV2_stub,
service_path,
port,
ssl_creds=ssl_creds,
channel=channel,
metadata_transformer=metadata_transformer,
scopes=scopes)
self._delete_log = api_callable.create_api_call(
self.stub.DeleteLog, settings=defaults['delete_log'])
self._write_log_entries = api_callable.create_api_call(
self.stub.WriteLogEntries, settings=defaults['write_log_entries'])
self._list_log_entries = api_callable.create_api_call(
self.stub.ListLogEntries, settings=defaults['list_log_entries'])
self._list_monitored_resource_descriptors = api_callable.create_api_call(
self.stub.ListMonitoredResourceDescriptors,
settings=defaults['list_monitored_resource_descriptors'])
# Service calls
def delete_log(self, log_name, options=None):
"""
Deletes a log and all its log entries.
The log will reappear if it receives new entries.
Example:
>>> from google.cloud.logging.v2.logging_service_v2_api import LoggingServiceV2Api
>>> api = LoggingServiceV2Api()
>>> log_name = api.log_path('[PROJECT]', '[LOG]')
>>> api.delete_log(log_name)
Args:
log_name (string): Required. The resource name of the log to delete. Example:
``"projects/my-project/logs/syslog"``.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
"""
request = logging_pb2.DeleteLogRequest(log_name=log_name)
self._delete_log(request, options)
def write_log_entries(self,
entries,
log_name='',
resource=None,
labels=None,
partial_success=False,
options=None):
"""
Writes log entries to Stackdriver Logging. All log entries are
written by this method.
Example:
>>> from google.cloud.logging.v2.logging_service_v2_api import LoggingServiceV2Api
>>> from google.logging.v2 import logging_pb2
>>> api = LoggingServiceV2Api()
>>> entries = []
>>> response = api.write_log_entries(entries)
Args:
log_name (string): Optional. A default log resource name for those log entries in ``entries``
that do not specify their own ``logName``. Example:
``"projects/my-project/logs/syslog"``. See
``LogEntry``.
resource (:class:`google.api.monitored_resource_pb2.MonitoredResource`): Optional. A default monitored resource for those log entries in ``entries``
that do not specify their own ``resource``.
labels (dict[string -> :class:`google.logging.v2.logging_pb2.WriteLogEntriesRequest.LabelsEntry`]): Optional. User-defined ``key:value`` items that are added to
the ``labels`` field of each log entry in ``entries``, except when a log
entry specifies its own ``key:value`` item with the same key.
Example: ``{ "size": "large", "color":"red" }``
entries (list[:class:`google.logging.v2.log_entry_pb2.LogEntry`]): Required. The log entries to write. The log entries must have values for
all required fields.
To improve throughput and to avoid exceeding the quota limit for calls
to ``entries.write``, use this field to write multiple log entries at once
rather than // calling this method for each log entry.
partial_success (bool): Optional. Whether valid entries should be written even if some other
entries fail due to INVALID_ARGUMENT or PERMISSION_DENIED errors. If any
entry is not written, the response status will be the error associated
with one of the failed entries and include error details in the form of
WriteLogEntriesPartialErrors.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
"""
if resource is None:
resource = monitored_resource_pb2.MonitoredResource()
if labels is None:
labels = []
request = logging_pb2.WriteLogEntriesRequest(
entries=entries,
log_name=log_name,
resource=resource,
labels=labels,
partial_success=partial_success)
self._write_log_entries(request, options)
def list_log_entries(self,
project_ids,
filter_='',
order_by='',
page_size=0,
options=None):
"""
Lists log entries. Use this method to retrieve log entries from Cloud
Logging. For ways to export log entries, see
`Exporting Logs <https://cloud.google.com/logging/docs/export>`_.
Example:
>>> from google.cloud.logging.v2.logging_service_v2_api import LoggingServiceV2Api
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = LoggingServiceV2Api()
>>> project_ids = []
>>>
>>> # Iterate over all results
>>> for element in api.list_log_entries(project_ids):
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_log_entries(project_ids, options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
project_ids (list[string]): Required. One or more project IDs or project numbers from which to retrieve
log entries. Examples of a project ID: ``"my-project-1A"``, ``"1234567890"``.
filter_ (string): Optional. An `advanced logs filter <https://cloud.google.com/logging/docs/view/advanced_filters>`_.
The filter is compared against all log entries in the projects specified by
``projectIds``. Only entries that match the filter are retrieved. An empty
filter matches all log entries.
order_by (string): Optional. How the results should be sorted. Presently, the only permitted
values are ``"timestamp asc"`` (default) and ``"timestamp desc"``. The first
option returns entries in order of increasing values of
``LogEntry.timestamp`` (oldest first), and the second option returns entries
in order of decreasing timestamps (newest first). Entries with equal
timestamps are returned in order of ``LogEntry.insertId``.
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.logging.v2.log_entry_pb2.LogEntry` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
"""
request = logging_pb2.ListLogEntriesRequest(
project_ids=project_ids,
filter=filter_,
order_by=order_by,
page_size=page_size)
return self._list_log_entries(request, options)
def list_monitored_resource_descriptors(self, page_size=0, options=None):
"""
Lists the monitored resource descriptors used by Stackdriver Logging.
Example:
>>> from google.cloud.logging.v2.logging_service_v2_api import LoggingServiceV2Api
>>> from google.gax import CallOptions, INITIAL_PAGE
>>> api = LoggingServiceV2Api()
>>>
>>> # Iterate over all results
>>> for element in api.list_monitored_resource_descriptors():
>>> # process element
>>> pass
>>>
>>> # Or iterate over results one page at a time
>>> for page in api.list_monitored_resource_descriptors(options=CallOptions(page_token=INITIAL_PAGE)):
>>> for element in page:
>>> # process element
>>> pass
Args:
page_size (int): The maximum number of resources contained in the
underlying API response. If page streaming is performed per-
resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number
of resources in a page.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.gax.PageIterator` instance. By default, this
is an iterable of :class:`google.api.monitored_resource_pb2.MonitoredResourceDescriptor` instances.
This object can also be configured to iterate over the pages
of the response through the `CallOptions` parameter.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
"""
request = logging_pb2.ListMonitoredResourceDescriptorsRequest(
page_size=page_size)
return self._list_monitored_resource_descriptors(request, options)
| |
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
from datetime import datetime, timedelta
from awscli.formatter import get_formatter
from awscli.arguments import CustomArgument
from awscli.customizations.commands import BasicCommand
from awscli.customizations.datapipeline import translator
from awscli.customizations.datapipeline.createdefaultroles \
import CreateDefaultRoles
from awscli.customizations.datapipeline.listrunsformatter \
import ListRunsFormatter
DEFINITION_HELP_TEXT = """\
The JSON pipeline definition. If the pipeline definition
is in a file you can use the file://<filename> syntax to
specify a filename.
"""
PARAMETER_OBJECTS_HELP_TEXT = """\
The JSON parameter objects. If the parameter objects are
in a file you can use the file://<filename> syntax to
specify a filename. You can optionally provide these in
pipeline definition as well. Parameter objects provided
on command line would replace the one in definition.
"""
PARAMETER_VALUES_HELP_TEXT = """\
The JSON parameter values. If the parameter values are
in a file you can use the file://<filename> syntax to
specify a filename. You can optionally provide these in
pipeline definition as well. Parameter values provided
on command line would replace the one in definition.
"""
INLINE_PARAMETER_VALUES_HELP_TEXT = """\
The JSON parameter values. You can specify these as
key-value pairs in the key=value format. Multiple parameters
are separated by a space. For list type parameter values
you can use the same key name and specify each value as
a key value pair. e.g. arrayValue=value1 arrayValue=value2
"""
class DocSectionNotFoundError(Exception):
pass
class ParameterDefinitionError(Exception):
def __init__(self, msg):
full_msg = ("Error in parameter: %s\n" % msg)
super(ParameterDefinitionError, self).__init__(full_msg)
self.msg = msg
def register_customizations(cli):
cli.register(
'building-argument-table.datapipeline.put-pipeline-definition',
add_pipeline_definition)
cli.register(
'building-argument-table.datapipeline.activate-pipeline',
activate_pipeline_definition)
cli.register(
'after-call.datapipeline.GetPipelineDefinition',
translate_definition)
cli.register(
'building-command-table.datapipeline',
register_commands)
cli.register_last(
'doc-output.datapipeline.get-pipeline-definition',
document_translation)
def register_commands(command_table, session, **kwargs):
command_table['list-runs'] = ListRunsCommand(session)
command_table['create-default-roles'] = CreateDefaultRoles(session)
def document_translation(help_command, **kwargs):
# Remove all the writes until we get to the output.
# I don't think this is the ideal way to do this, we should
# improve our plugin/doc system to make this easier.
doc = help_command.doc
current = ''
while current != '======\nOutput\n======':
try:
current = doc.pop_write()
except IndexError:
# This should never happen, but in the rare case that it does
# we should be raising something with a helpful error message.
raise DocSectionNotFoundError(
'Could not find the "output" section for the command: %s'
% help_command)
doc.write('======\nOutput\n======')
doc.write(
'\nThe output of this command is the pipeline definition, which'
' is documented in the '
'`Pipeline Definition File Syntax '
'<http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/'
'dp-writing-pipeline-definition.html>`__')
def add_pipeline_definition(argument_table, **kwargs):
argument_table['pipeline-definition'] = PipelineDefinitionArgument(
'pipeline-definition', required=True,
help_text=DEFINITION_HELP_TEXT)
argument_table['parameter-objects'] = ParameterObjectsArgument(
'parameter-objects', required=False,
help_text=PARAMETER_OBJECTS_HELP_TEXT)
argument_table['parameter-values-uri'] = ParameterValuesArgument(
'parameter-values-uri',
required=False,
help_text=PARAMETER_VALUES_HELP_TEXT)
# Need to use an argument model for inline parameters to accept a list
argument_table['parameter-values'] = ParameterValuesInlineArgument(
'parameter-values',
required=False,
nargs='+',
help_text=INLINE_PARAMETER_VALUES_HELP_TEXT)
# The pipeline-objects is no longer needed required because
# a user can provide a pipeline-definition instead.
# get-pipeline-definition also displays the output in the
# translated format.
del argument_table['pipeline-objects']
def activate_pipeline_definition(argument_table, **kwargs):
argument_table['parameter-values-uri'] = ParameterValuesArgument(
'parameter-values-uri', required=False,
help_text=PARAMETER_VALUES_HELP_TEXT)
# Need to use an argument model for inline parameters to accept a list
argument_table['parameter-values'] = ParameterValuesInlineArgument(
'parameter-values',
required=False,
nargs='+',
help_text=INLINE_PARAMETER_VALUES_HELP_TEXT,
)
def translate_definition(parsed, **kwargs):
translator.api_to_definition(parsed)
def convert_described_objects(api_describe_objects, sort_key_func=None):
# We need to take a field list that looks like this:
# {u'key': u'@sphere', u'stringValue': u'INSTANCE'},
# into {"@sphere": "INSTANCE}.
# We convert the fields list into a field dict.
converted = []
for obj in api_describe_objects:
new_fields = {
'@id': obj['id'],
'name': obj['name'],
}
for field in obj['fields']:
new_fields[field['key']] = field.get('stringValue',
field.get('refValue'))
converted.append(new_fields)
if sort_key_func is not None:
converted.sort(key=sort_key_func)
return converted
class QueryArgBuilder(object):
"""
Convert CLI arguments to Query arguments used by QueryObject.
"""
def __init__(self, current_time=None):
if current_time is None:
current_time = datetime.utcnow()
self.current_time = current_time
def build_query(self, parsed_args):
selectors = []
if parsed_args.start_interval is None and \
parsed_args.schedule_interval is None:
# If no intervals are specified, default
# to a start time of 4 days ago and an end time
# of right now.
end_datetime = self.current_time
start_datetime = end_datetime - timedelta(days=4)
start_time_str = start_datetime.strftime('%Y-%m-%dT%H:%M:%S')
end_time_str = end_datetime.strftime('%Y-%m-%dT%H:%M:%S')
selectors.append({
'fieldName': '@actualStartTime',
'operator': {
'type': 'BETWEEN',
'values': [start_time_str, end_time_str]
}
})
else:
self._build_schedule_times(selectors, parsed_args)
if parsed_args.status is not None:
self._build_status(selectors, parsed_args)
query = {'selectors': selectors}
return query
def _build_schedule_times(self, selectors, parsed_args):
if parsed_args.start_interval is not None:
start_time_str = parsed_args.start_interval[0]
end_time_str = parsed_args.start_interval[1]
selectors.append({
'fieldName': '@actualStartTime',
'operator': {
'type': 'BETWEEN',
'values': [start_time_str, end_time_str]
}
})
if parsed_args.schedule_interval is not None:
start_time_str = parsed_args.schedule_interval[0]
end_time_str = parsed_args.schedule_interval[1]
selectors.append({
'fieldName': '@scheduledStartTime',
'operator': {
'type': 'BETWEEN',
'values': [start_time_str, end_time_str]
}
})
def _build_status(self, selectors, parsed_args):
selectors.append({
'fieldName': '@status',
'operator': {
'type': 'EQ',
'values': [status.upper() for status in parsed_args.status]
}
})
class PipelineDefinitionArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
parsed = json.loads(value)
api_objects = translator.definition_to_api_objects(parsed)
parameter_objects = translator.definition_to_api_parameters(parsed)
parameter_values = translator.definition_to_parameter_values(parsed)
parameters['pipelineObjects'] = api_objects
# Use Parameter objects and values from def if not already provided
if 'parameterObjects' not in parameters \
and parameter_objects is not None:
parameters['parameterObjects'] = parameter_objects
if 'parameterValues' not in parameters \
and parameter_values is not None:
parameters['parameterValues'] = parameter_values
class ParameterObjectsArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
parsed = json.loads(value)
parameter_objects = translator.definition_to_api_parameters(parsed)
parameters['parameterObjects'] = parameter_objects
class ParameterValuesArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
if parameters.get('parameterValues', None) is not None:
raise Exception(
"Only parameter-values or parameter-values-uri is allowed"
)
parsed = json.loads(value)
parameter_values = translator.definition_to_parameter_values(parsed)
parameters['parameterValues'] = parameter_values
class ParameterValuesInlineArgument(CustomArgument):
def add_to_params(self, parameters, value):
if value is None:
return
if parameters.get('parameterValues', None) is not None:
raise Exception(
"Only parameter-values or parameter-values-uri is allowed"
)
parameter_object = {}
# break string into = point
for argument in value:
try:
argument_components = argument.split('=', 1)
key = argument_components[0]
value = argument_components[1]
if key in parameter_object:
parameter_object[key] = [parameter_object[key], value]
else:
parameter_object[key] = value
except IndexError:
raise ParameterDefinitionError(
"Invalid inline parameter format: %s" % argument
)
parsed = {'values': parameter_object}
parameter_values = translator.definition_to_parameter_values(parsed)
parameters['parameterValues'] = parameter_values
class ListRunsCommand(BasicCommand):
NAME = 'list-runs'
DESCRIPTION = (
'Lists the times the specified pipeline has run. '
'You can optionally filter the complete list of '
'results to include only the runs you are interested in.')
ARG_TABLE = [
{'name': 'pipeline-id', 'help_text': 'The identifier of the pipeline.',
'action': 'store', 'required': True, 'cli_type_name': 'string', },
{'name': 'status',
'help_text': (
'Filters the list to include only runs in the '
'specified statuses. '
'The valid statuses are as follows: waiting, pending, cancelled, '
'running, finished, failed, waiting_for_runner, '
'and waiting_on_dependencies.'),
'action': 'store'},
{'name': 'start-interval',
'help_text': (
'Filters the list to include only runs that started '
'within the specified interval.'),
'action': 'store', 'required': False, 'cli_type_name': 'string', },
{'name': 'schedule-interval',
'help_text': (
'Filters the list to include only runs that are scheduled to '
'start within the specified interval.'),
'action': 'store', 'required': False, 'cli_type_name': 'string', },
]
VALID_STATUS = ['waiting', 'pending', 'cancelled', 'running',
'finished', 'failed', 'waiting_for_runner',
'waiting_on_dependencies', 'shutting_down']
def _run_main(self, parsed_args, parsed_globals, **kwargs):
self._set_client(parsed_globals)
self._parse_type_args(parsed_args)
self._list_runs(parsed_args, parsed_globals)
def _set_client(self, parsed_globals):
# This is called from _run_main and is used to ensure that we have
# a service/endpoint object to work with.
self.client = self._session.create_client(
'datapipeline',
region_name=parsed_globals.region,
endpoint_url=parsed_globals.endpoint_url,
verify=parsed_globals.verify_ssl)
def _parse_type_args(self, parsed_args):
# TODO: give good error messages!
# Parse the start/schedule times.
# Parse the status csv.
if parsed_args.start_interval is not None:
parsed_args.start_interval = [
arg.strip() for arg in
parsed_args.start_interval.split(',')]
if parsed_args.schedule_interval is not None:
parsed_args.schedule_interval = [
arg.strip() for arg in
parsed_args.schedule_interval.split(',')]
if parsed_args.status is not None:
parsed_args.status = [
arg.strip() for arg in
parsed_args.status.split(',')]
self._validate_status_choices(parsed_args.status)
def _validate_status_choices(self, statuses):
for status in statuses:
if status not in self.VALID_STATUS:
raise ValueError("Invalid status: %s, must be one of: %s" %
(status, ', '.join(self.VALID_STATUS)))
def _list_runs(self, parsed_args, parsed_globals):
query = QueryArgBuilder().build_query(parsed_args)
object_ids = self._query_objects(parsed_args.pipeline_id, query)
objects = self._describe_objects(parsed_args.pipeline_id, object_ids)[
'pipelineObjects']
converted = convert_described_objects(
objects,
sort_key_func=lambda x: (x.get('@scheduledStartTime'),
x.get('name')))
formatter = self._get_formatter(parsed_globals)
formatter(self.NAME, converted)
def _describe_objects(self, pipeline_id, object_ids):
parsed = self.client.describe_objects(
pipelineId=pipeline_id, objectIds=object_ids)
return parsed
def _query_objects(self, pipeline_id, query):
paginator = self.client.get_paginator('query_objects').paginate(
pipelineId=pipeline_id,
sphere='INSTANCE', query=query)
parsed = paginator.build_full_result()
return parsed['ids']
def _get_formatter(self, parsed_globals):
output = parsed_globals.output
if output is None:
return ListRunsFormatter(parsed_globals)
else:
return get_formatter(output, parsed_globals)
| |
#
# Pan.py -- Pan plugin for fits viewer
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import sys
import traceback
import math
from ginga.misc import Widgets, CanvasTypes, Bunch
from ginga import GingaPlugin
class Pan(GingaPlugin.GlobalPlugin):
def __init__(self, fv):
# superclass defines some variables for us, like logger
super(Pan, self).__init__(fv)
self.channel = {}
self.active = None
self.info = None
fv.add_callback('add-channel', self.add_channel)
fv.add_callback('delete-channel', self.delete_channel)
fv.set_callback('active-image', self.focus_cb)
def build_gui(self, container):
nb = Widgets.StackWidget()
self.nb = nb
container.add_widget(self.nb, stretch=1)
def _create_pan_image(self):
width, height = 300, 300
sfi = CanvasTypes.ImageViewCanvas(logger=self.logger)
sfi.enable_autozoom('on')
sfi.enable_autocuts('off')
sfi.enable_draw(True)
sfi.set_drawtype('rectangle', linestyle='dash')
sfi.set_drawcolor('green')
sfi.set_callback('draw-event', self.draw_cb)
hand = sfi.get_cursor('pan')
sfi.define_cursor('pick', hand)
## sfi.enable_cuts(False)
sfi.set_bg(0.4, 0.4, 0.4)
sfi.set_desired_size(width, height)
sfi.set_callback('cursor-down', self.btndown)
sfi.set_callback('cursor-move', self.drag_cb)
sfi.set_callback('none-move', self.motion_cb)
sfi.set_callback('scroll', self.zoom)
sfi.set_callback('configure', self.reconfigure)
# for debugging
sfi.set_name('panimage')
bd = sfi.get_bindings()
bd.enable_pan(False)
bd.enable_zoom(False)
#iw = sfi.get_widget()
sfi.set_desired_size(width, height)
return sfi
def add_channel(self, viewer, chinfo):
panimage = self._create_pan_image()
chname = chinfo.name
iw = panimage.get_widget()
# wrap widget
iw = Widgets.wrap(iw)
self.nb.add_widget(iw)
index = self.nb.index_of(iw)
paninfo = Bunch.Bunch(panimage=panimage, widget=iw,
pancompass=None, panrect=None)
self.channel[chname] = paninfo
# Extract RGBMap object from main image and attach it to this
# pan image
fitsimage = chinfo.fitsimage
rgbmap = fitsimage.get_rgbmap()
panimage.set_rgbmap(rgbmap, redraw=False)
rgbmap.add_callback('changed', self.rgbmap_cb, panimage)
fitsimage.copy_attributes(panimage, ['cutlevels'])
fitsimage.add_callback('image-set', self.new_image_cb, chinfo, paninfo)
fitsimage.add_callback('redraw', self.panset, chinfo, paninfo)
fitssettings = fitsimage.get_settings()
pansettings = panimage.get_settings()
zoomsettings = ['zoom_algorithm', 'zoom_rate',
'scale_x_base', 'scale_y_base']
fitssettings.shareSettings(pansettings, zoomsettings)
for key in zoomsettings:
pansettings.getSetting(key).add_callback('set', self.zoom_cb,
fitsimage, chinfo, paninfo)
xfrmsettings = ['flip_x', 'flip_y', 'swap_xy', 'rot_deg']
fitssettings.shareSettings(pansettings, xfrmsettings)
for key in xfrmsettings:
pansettings.getSetting(key).add_callback('set', self.redraw_cb,
fitsimage, chinfo, paninfo, 0)
fitssettings.shareSettings(pansettings, ['cuts'])
pansettings.getSetting('cuts').add_callback('set', self.redraw_cb,
fitsimage, chinfo, paninfo, 1)
self.logger.debug("channel %s added." % (chinfo.name))
def delete_channel(self, viewer, chinfo):
chname = chinfo.name
self.logger.debug("deleting channel %s" % (chname))
widget = self.channel[chname].widget
self.nb.remove(widget, delete=True)
self.active = None
self.info = None
del self.channel[chname]
def start(self):
names = self.fv.get_channelNames()
for name in names:
chinfo = self.fv.get_channelInfo(name)
self.add_channel(self.fv, chinfo)
# CALLBACKS
def rgbmap_cb(self, rgbmap, panimage):
# color mapping has changed in some way
panimage.redraw(whence=1)
def new_image_cb(self, fitsimage, image, chinfo, paninfo):
loval, hival = fitsimage.get_cut_levels()
paninfo.panimage.cut_levels(loval, hival, redraw=False)
# add cb to image so that if it is modified we can update info
image.add_callback('modified', self.image_update_cb, fitsimage,
chinfo, paninfo)
self.set_image(chinfo, paninfo, image)
def image_update_cb(self, image, fitsimage, chinfo, paninfo):
# image has changed (e.g. size, value range, etc)
cur_img = fitsimage.get_image()
if cur_img == image:
self.set_image(chinfo, paninfo, image)
return True
def focus_cb(self, viewer, fitsimage):
chname = self.fv.get_channelName(fitsimage)
chinfo = self.fv.get_channelInfo(chname)
chname = chinfo.name
if self.active != chname:
iw = self.channel[chname].widget
index = self.nb.index_of(iw)
self.nb.set_index(index)
self.active = chname
self.info = self.channel[self.active]
def reconfigure(self, fitsimage, width, height):
self.logger.debug("new pan image dimensions are %dx%d" % (
width, height))
fitsimage.zoom_fit()
def redraw_cb(self, setting, value, fitsimage, chinfo, paninfo, whence):
paninfo.panimage.redraw(whence=whence)
self.panset(chinfo.fitsimage, chinfo, paninfo)
return True
def zoom_cb(self, setting, value, fitsimage, chinfo, paninfo):
# refit the pan image, because scale factors may have changed
paninfo.panimage.zoom_fit(redraw=True)
# redraw pan info
self.panset(fitsimage, chinfo, paninfo)
return True
# LOGIC
def clear(self):
self.info.panimage.clear()
def set_image(self, chinfo, paninfo, image):
if image is None:
return
paninfo.panimage.set_image(image)
# remove old compass
try:
paninfo.panimage.deleteObjectByTag(paninfo.pancompass,
redraw=False)
except Exception:
pass
# create compass
if image.has_valid_wcs():
try:
width, height = image.get_size()
x, y = width / 2.0, height / 2.0
# radius we want the arms to be (approx 1/4 the largest dimension)
radius = float(max(width, height)) / 4.0
# HACK: force a wcs error here if one is going to happen
image.add_offset_xy(x, y, 1.0, 1.0)
paninfo.pancompass = paninfo.panimage.add(CanvasTypes.Compass(
x, y, radius, color='skyblue',
fontsize=14), redraw=True)
except Exception as e:
self.logger.warn("Can't calculate compass: %s" % (
str(e)))
try:
# log traceback, if possible
(type_, value_, tb) = sys.exc_info()
tb_str = "".join(traceback.format_tb(tb))
self.logger.error("Traceback:\n%s" % (tb_str))
except Exception:
tb_str = "Traceback information unavailable."
self.logger.error(tb_str)
self.panset(chinfo.fitsimage, chinfo, paninfo)
def panset(self, fitsimage, chinfo, paninfo):
image = fitsimage.get_image()
if image is None:
return
x, y = fitsimage.get_pan()
points = fitsimage.get_pan_rect()
# calculate pan position point radius
image = paninfo.panimage.get_image()
if image is None:
return
width, height = image.get_size()
edgew = math.sqrt(width**2 + height**2)
radius = int(0.015 * edgew)
# Mark pan rectangle and pan position
try:
obj = paninfo.panimage.getObjectByTag(paninfo.panrect)
if obj.kind != 'compound':
return True
point, bbox = obj.objects
self.logger.debug("starting panset")
point.x, point.y = x, y
point.radius = radius
bbox.points = points
paninfo.panimage.redraw(whence=3)
except KeyError:
paninfo.panrect = paninfo.panimage.add(CanvasTypes.CompoundObject(
CanvasTypes.Point(x, y, radius=radius, style='plus'),
CanvasTypes.Polygon(points)))
def motion_cb(self, fitsimage, event, data_x, data_y):
bigimage = self.fv.getfocus_fitsimage()
self.fv.showxy(bigimage, data_x, data_y)
return True
def drag_cb(self, fitsimage, event, data_x, data_y):
# this is a panning move in the small
# window for the big window
bigimage = self.fv.getfocus_fitsimage()
bigimage.panset_xy(data_x, data_y)
return True
def btndown(self, fitsimage, event, data_x, data_y):
bigimage = self.fv.getfocus_fitsimage()
bigimage.panset_xy(data_x, data_y)
return True
def zoom(self, fitsimage, event):
"""Scroll event in the small fits window. Just zoom the large fits
window.
"""
fitsimage = self.fv.getfocus_fitsimage()
prefs = self.fv.get_preferences()
settings = prefs.getSettings('general')
rev = settings.get('zoom_scroll_reverse', False)
direction = event.direction
if (direction < 90.0) or (direction > 270.0):
if not rev:
fitsimage.zoom_in()
else:
fitsimage.zoom_out()
elif (90.0 < direction < 270.0):
if not rev:
fitsimage.zoom_out()
else:
fitsimage.zoom_in()
fitsimage.onscreen_message(fitsimage.get_scale_text(),
delay=1.0)
def draw_cb(self, fitsimage, tag):
# Get and delete the drawn object
obj = fitsimage.getObjectByTag(tag)
fitsimage.deleteObjectByTag(tag, redraw=True)
# determine center of drawn rectangle and set pan position
if obj.kind != 'rectangle':
return True
xc = (obj.x1 + obj.x2) / 2.0
yc = (obj.y1 + obj.y2) / 2.0
fitsimage = self.fv.getfocus_fitsimage()
# note: fitsimage <-- referring to large non-pan image
fitsimage.panset_xy(xc, yc, redraw=False)
# Determine appropriate zoom level to fit this rect
wd = obj.x2 - obj.x1
ht = obj.y2 - obj.y1
wwidth, wheight = fitsimage.get_window_size()
wd_scale = float(wwidth) / float(wd)
ht_scale = float(wheight) / float(ht)
scale = min(wd_scale, ht_scale)
self.logger.debug("wd_scale=%f ht_scale=%f scale=%f" % (
wd_scale, ht_scale, scale))
if scale < 1.0:
zoomlevel = - max(2, int(math.ceil(1.0/scale)))
else:
zoomlevel = max(1, int(math.floor(scale)))
self.logger.debug("zoomlevel=%d" % (zoomlevel))
fitsimage.zoom_to(zoomlevel, redraw=True)
return True
def __str__(self):
return 'pan'
#END
| |
#########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program; if not, write to the Free Software #
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA #
# 02111-1307 USA #
# #
# #
#########################################################################
import sys, os, rtf2xml.copy, tempfile
# note to self. This is the first module in which I use tempfile. A good idea?
"""
"""
class AddBrackets:
"""
Add brackets for old RTF.
Logic:
"""
def __init__(self, in_file,
bug_handler,
copy = None,
run_level = 1,
):
"""
Required:
'file'--file to parse
Optional:
'copy'-- whether to make a copy of result for debugging
'temp_dir' --where to output temporary results (default is
directory from which the script is run.)
Returns:
nothing
"""
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__write_to = tempfile.mktemp()
self.__run_level = run_level
def __initiate_values(self):
"""
"""
self.__state_dict = {
'before_body' : self.__before_body_func,
'in_body' : self.__in_body_func,
'after_control_word' : self.__after_control_word_func,
'in_ignore' : self.__ignore_func,
}
self.__state = 'before_body'
self.__inline = {}
self.__temp_group = []
self.__open_bracket = 0
self.__found_brackets = 0
self.__accept = [
'cw<ci<bold______',
'cw<ci<annotation' ,
'cw<ci<blue______' ,
'cw<ci<bold______' ,
'cw<ci<caps______' ,
'cw<ci<char-style' ,
'cw<ci<dbl-strike' ,
'cw<ci<emboss____' ,
'cw<ci<engrave___' ,
'cw<ci<font-color' ,
'cw<ci<font-down_' ,
'cw<ci<font-size_' ,
'cw<ci<font-style' ,
'cw<ci<font-up___',
'cw<ci<footnot-mk',
'cw<ci<green_____' ,
'cw<ci<hidden____',
'cw<ci<italics___' ,
'cw<ci<outline___',
'cw<ci<red_______' ,
'cw<ci<shadow____',
'cw<ci<small-caps' ,
'cw<ci<strike-thr',
'cw<ci<subscript_' ,
'cw<ci<superscrip',
'cw<ci<underlined' ,
'cw<ul<underlined' ,
]
def __before_body_func(self, line):
"""
"""
if self.__token_info == 'mi<mk<body-open_':
self.__state = 'in_body'
self.__write_obj.write(line)
def __in_body_func(self, line):
"""
"""
if line == 'cb<nu<clos-brack<0001\n' and self.__open_bracket:
self.__write_obj.write(
'cb<nu<clos-brack<0003\n'
)
self.__write_obj.write(line)
elif self.__token_info == 'ob<nu<open-brack':
self.__found_brackets = 1
self.__state = 'in_ignore'
self.__ignore_count = self.__ob_count
self.__write_obj.write(line)
elif self.__token_info in self.__accept:
self.__temp_group.append(line)
self.__state = 'after_control_word'
else:
self.__write_obj.write(line)
def __after_control_word_func(self, line):
"""
"""
if self.__token_info in self.__accept:
self.__temp_group.append(line)
else:
self.__change_permanent_group()
self.__write_group()
self.__write_obj.write(line)
if self.__token_info == 'ob<nu<open-brack':
self.__state = 'in_ignore'
self.__ignore_count = self.__ob_count
else:
self.__state = 'in_body'
def __write_group(self):
"""
"""
if self.__open_bracket:
self.__write_obj.write(
'cb<nu<clos-brack<0003\n'
)
self.__open_bracket = 0
inline_string = ''
the_keys = self.__inline.keys()
for the_key in the_keys:
value = self.__inline[the_key]
if value != 'false':
inline_string += '%s<nu<%s\n' % (the_key, value)
if inline_string:
self.__write_obj.write('ob<nu<open-brack<0003\n')
self.__write_obj.write(inline_string)
self.__open_bracket = 1
self.__temp_group = []
def __change_permanent_group(self):
"""
use temp group to change permanent group
"""
for line in self.__temp_group:
token_info = line[:16]
if token_info in self.__accept:
att = line[20:-1]
self.__inline[token_info] = att
def __ignore_func(self, line):
"""
Don't add any brackets while inside of brackets RTF has already
added.
"""
self.__write_obj.write(line)
if self.__token_info == 'cb<nu<clos-brack'and\
self.__cb_count == self.__ignore_count:
self.__state = 'in_body'
def __check_brackets(self, in_file):
self.__check_brack_obj = rtf2xml.check_brackets.CheckBrackets\
(file = in_file)
good_br, msg = self.__check_brack_obj.check_brackets()
if not good_br:
return 1
def add_brackets(self):
"""
"""
self.__initiate_values()
read_obj = open(self.__file, 'r')
self.__write_obj = open(self.__write_to, 'w')
line_to_read = 1
while line_to_read:
line_to_read = read_obj.readline()
line = line_to_read
self.__token_info = line[:16]
if self.__token_info == 'ob<nu<open-brack':
self.__ob_count = line[-5:-1]
if self.__token_info == 'cb<nu<clos-brack':
self.__cb_count = line[-5:-1]
action = self.__state_dict.get(self.__state)
if action == None:
sys.stderr.write('No matching state in module add_brackets.py\n')
sys.stderr.write(self.__state + '\n')
action(line)
read_obj.close()
self.__write_obj.close()
bad_brackets = self.__check_brackets(self.__write_to)
if not bad_brackets:
copy_obj = rtf2xml.copy.Copy(bug_handler = self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "add_brackets.data")
copy_obj.rename(self.__write_to, self.__file)
else:
if self.__run_level > 0:
sys.stderr.write(
'Sorry, but this files has a mix of old and new RTF.\n'
'Some characteristics cannot be converted.\n')
os.remove(self.__write_to)
| |
"""
Service support for Debian systems (uses update-rc.d and /sbin/service)
.. important::
If you feel that Salt should be using this module to manage services on a
minion, and it is using a different module (or gives an error similar to
*'service.start' is not available*), see :ref:`here
<module-provider-override>`.
"""
import fnmatch
import glob
import logging
import os
import re
import shlex
import salt.utils.systemd
__func_alias__ = {"reload_": "reload"}
# Define the module's virtual name
__virtualname__ = "service"
log = logging.getLogger(__name__)
def __virtual__():
"""
Only work on Debian and when systemd isn't running
"""
if (
__grains__["os"]
in (
"Debian",
"Raspbian",
"Devuan",
"NILinuxRT",
)
and not salt.utils.systemd.booted(__context__)
):
return __virtualname__
else:
return (
False,
"The debian_service module could not be loaded: "
"unsupported OS family and/or systemd running.",
)
def _service_cmd(*args):
return "service {} {}".format(args[0], " ".join(args[1:]))
def _get_runlevel():
"""
returns the current runlevel
"""
out = __salt__["cmd.run"]("runlevel")
# unknown can be returned while inside a container environment, since
# this is due to a lack of init, it should be safe to assume runlevel
# 2, which is Debian's default. If not, all service related states
# will throw an out of range exception here which will cause
# other functions to fail.
if "unknown" in out:
return "2"
else:
return out.split()[1]
def get_enabled():
"""
Return a list of service that are enabled on boot
CLI Example:
.. code-block:: bash
salt '*' service.get_enabled
"""
prefix = "/etc/rc[S{}].d/S".format(_get_runlevel())
ret = set()
for line in [x.rsplit(os.sep, 1)[-1] for x in glob.glob("{}*".format(prefix))]:
ret.add(re.split(r"\d+", line)[-1])
return sorted(ret)
def get_disabled():
"""
Return a set of services that are installed but disabled
CLI Example:
.. code-block:: bash
salt '*' service.get_disabled
"""
return sorted(set(get_all()) - set(get_enabled()))
def available(name):
"""
Returns ``True`` if the specified service is available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.available sshd
"""
return name in get_all()
def missing(name):
"""
The inverse of service.available.
Returns ``True`` if the specified service is not available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' service.missing sshd
"""
return name not in get_all()
def get_all():
"""
Return all available boot services
CLI Example:
.. code-block:: bash
salt '*' service.get_all
"""
ret = set()
lines = glob.glob("/etc/init.d/*")
for line in lines:
service = line.split("/etc/init.d/")[1]
# Remove README. If it's an enabled service, it will be added back in.
if service != "README":
ret.add(service)
return sorted(ret | set(get_enabled()))
def start(name):
"""
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
"""
cmd = _service_cmd(name, "start")
return not __salt__["cmd.retcode"](cmd)
def stop(name):
"""
Stop the specified service
CLI Example:
.. code-block:: bash
salt '*' service.stop <service name>
"""
cmd = _service_cmd(name, "stop")
return not __salt__["cmd.retcode"](cmd)
def restart(name):
"""
Restart the named service
CLI Example:
.. code-block:: bash
salt '*' service.restart <service name>
"""
cmd = _service_cmd(name, "restart")
return not __salt__["cmd.retcode"](cmd)
def reload_(name):
"""
Reload the named service
CLI Example:
.. code-block:: bash
salt '*' service.reload <service name>
"""
cmd = _service_cmd(name, "reload")
return not __salt__["cmd.retcode"](cmd)
def force_reload(name):
"""
Force-reload the named service
CLI Example:
.. code-block:: bash
salt '*' service.force_reload <service name>
"""
cmd = _service_cmd(name, "force-reload")
return not __salt__["cmd.retcode"](cmd)
def status(name, sig=None):
"""
Return the status for a service.
If the name contains globbing, a dict mapping service name to True/False
values is returned.
.. versionchanged:: 2018.3.0
The service name can now be a glob (e.g. ``salt*``)
Args:
name (str): The name of the service to check
sig (str): Signature to use to find the service via ps
Returns:
bool: True if running, False otherwise
dict: Maps service name to True if running, False otherwise
CLI Example:
.. code-block:: bash
salt '*' service.status <service name> [service signature]
"""
if sig:
return bool(__salt__["status.pid"](sig))
contains_globbing = bool(re.search(r"\*|\?|\[.+\]", name))
if contains_globbing:
services = fnmatch.filter(get_all(), name)
else:
services = [name]
results = {}
for service in services:
cmd = _service_cmd(service, "status")
results[service] = not __salt__["cmd.retcode"](cmd, ignore_retcode=True)
if contains_globbing:
return results
return results[name]
def enable(name, **kwargs):
"""
Enable the named service to start at boot
CLI Example:
.. code-block:: bash
salt '*' service.enable <service name>
"""
cmd = "insserv {0} && update-rc.d {0} enable".format(shlex.quote(name))
return not __salt__["cmd.retcode"](cmd, python_shell=True)
def disable(name, **kwargs):
"""
Disable the named service to start at boot
CLI Example:
.. code-block:: bash
salt '*' service.disable <service name>
"""
cmd = "update-rc.d {} disable".format(name)
return not __salt__["cmd.retcode"](cmd)
def enabled(name, **kwargs):
"""
Return True if the named service is enabled, false otherwise
CLI Example:
.. code-block:: bash
salt '*' service.enabled <service name>
"""
return name in get_enabled()
def disabled(name):
"""
Return True if the named service is disabled, false otherwise
CLI Example:
.. code-block:: bash
salt '*' service.disabled <service name>
"""
return name in get_disabled()
| |
'''
Several testcases around mutators
'''
import sys, getopt
sys.path.append("..")
import unittest
import utils, helpers
from Peach.Mutators import *
from Peach.Engine.engine import *
utils.Utils.mutators = [
"default.NullMutator",
"string.StringTokenMutator",
"string.XmlW3CMutator",
"string.PathMutator",
"string.HostnameMutator",
"string.FilenameMutator",
"number.NumericalEdgeCaseMutator",
"number.NumericalVarianceMutator",
"number.FiniteRandomNumbersMutator",
"blob.BitFlipperMutator",
"datatree.DataTreeRemoveMutator",
"datatree.DataTreeDuplicateMutator",
"datatree.DataTreeSwapNearNodesMutator"
#"array.ArrayVaranceMutator",
#"array.ArrayNumericalEdgeCasesMutator",
#"array.ArrayReverseOrderMutator",
#"array.ArrayRandomizeOrderMutator",
#"size.SizedVaranceMutator",
#"size.SizedNumericalEdgeCasesMutator",
]
def suite():
suite = unittest.TestSuite()
suite.addTest(MutatorsCountTest())
suite.addTest(MutatorsGetState())
suite.addTest(MutatorsRunSingle())
suite.addTest(MutatorsRunCombo())
suite.addTest(MutatorSize())
return suite
class MutatorsCountTest(utils.PeachTestCaseNoAgent):
def runTest(self):
parse = ParseTemplate()
peach = parse.parse("file:mutatorsCombo.xml")
test = peach["DefaultRun"].tests[0]
stateMachine = test.stateMachine
stateEngine = StateEngine(peach["DefaultRun"], stateMachine, test.publisher)
mutators = []
for m in test.getMutators():
mutators.append(m.mutator)
mutator = MutatorCollection(mutators)
print "Running %d iterations" % (len(mutators)+1)
for i in range(len(mutators)+1):
print ".",
# Run state machine
mutator.getCount()
stateEngine.run(mutator)
mutator.next()
# now get values!
print "\nWaiting for count"
timeout = 200
goNext = False
count = 0
for i in range(timeout):
mutator.getCount()
print ".",
time.sleep(1)
count = 0
goNext = False
for m in mutators:
m.onStateMachineComplete(None)
if m.getCount() == -1:
print "%s did not have count yet" % m.name
goNext = True
#break
count += m.getCount()
if goNext:
continue
break
print ""
for m in mutators:
print "%s:" % m.name, m.getCount()
print "\nTotal Count:", count
assert not goNext, "Not everything counted okay"
class MutatorsGetState(utils.PeachTestCaseNoAgent):
def runTest(self):
parse = ParseTemplate()
peach = parse.parse("file:mutatorsCombo.xml")
test = peach["DefaultRun"].tests[0]
stateMachine = test.stateMachine
stateEngine = StateEngine(peach["DefaultRun"], stateMachine, test.publisher)
mutators = []
for m in test.getMutators():
mutators.append(m.mutator)
state = m.mutator.getState()
m.mutator.setState(state)
mutator = MutatorCollection(mutators)
print "Running %d iterations" % (len(mutators)+1)
for i in range(len(mutators)+1):
print ".",
# Run state machine
mutator.getCount()
stateEngine.run(mutator)
mutator.next()
for m in mutators:
state = m.getState()
m.setState(state)
mutator = MutatorCollection(mutators)
print "\nRunning %d iterations" % (len(mutators)+1)
for i in range(len(mutators)+1):
print ".",
# Run state machine
stateEngine.run(mutator)
mutator.next()
##class MutatorsRunMultiple(utils.PeachTestCaseNoAgent):
## def runTest(self):
## for m in utils.Utils.mutators:
## m = m[m.index('.')+1:]
## xmlFile = "file:mutatorsMultiple%s.xml" % m
##
## print "\nRunning: %s" % xmlFile
## watcher = helpers.UnittestEngineWatcher()
## engine = Engine()
## engine.Run(xmlFile, None, False, watcher)
##
## def countTestCases(self):
## return len(utils.Utils.mutators)
##
###class MutatorsRunSingle(utils.PeachTestCaseNoAgent):
### def runTest(self):
### xmlFile = "file:mutatorsSingleFiniteRandomNumbersMutator.xml"
###
### print "\nRunning: %s" % xmlFile
### watcher = helpers.UnittestEngineWatcher()
### engine = Engine()
### engine.Run(xmlFile, None, False, watcher)
###
### def countTestCases(self):
### return len(utils.Utils.mutators)
class MutatorSize(utils.PeachTestCaseNoAgent):
def runTest(self):
xmlFile = "file:mutatorSize.xml"
print "\nRunning: %s" % xmlFile
watcher = helpers.UnittestEngineWatcher()
engine = Engine()
engine.Run(xmlFile, None, False, watcher)
def countTestCases(self):
return len(utils.Utils.mutators)
class MutatorsRunSingle(utils.PeachTestCaseNoAgent):
def runTest(self):
for m in utils.Utils.mutators:
m = m[m.index('.')+1:]
xmlFile = "file:mutatorsSingle%s.xml" % m
print "\nRunning: %s" % xmlFile
watcher = helpers.UnittestEngineWatcher()
engine = Engine()
engine.Run(xmlFile, None, False, watcher)
def countTestCases(self):
return len(utils.Utils.mutators)
class MutatorsRunCombo(utils.PeachTestCaseNoAgent):
def runTest(self):
xmlFile = "file:mutatorsCombo.xml"
print "\nRunning: %s" % xmlFile
watcher = helpers.UnittestEngineWatcher()
engine = Engine()
engine.Run(xmlFile, None, False, watcher)
if __name__ == "__main__":
unittest.main()
# end
#
#- Run all mutators all the way through
# - Single type element
# - Multiple type elements
# - Combo file
#- Count all mutators
#- Memory leak detection on long run
#- Run each through, snapshot results for comparison
# - Run compare tests
#
| |
# Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""Create Station-model plots."""
from enum import Enum
import numpy as np
from .wx_symbols import (current_weather, high_clouds, low_clouds, mid_clouds,
pressure_tendency, sky_cover, wx_symbol_font)
from ..package_tools import Exporter
exporter = Exporter(globals())
@exporter.export
class StationPlot(object):
"""Make a standard meteorological station plot.
Plots values, symbols, or text spaced around a central location. Can also plot wind
barbs as the center of the location.
"""
location_names = {'C': (0, 0), 'N': (0, 1), 'NE': (1, 1), 'E': (1, 0), 'SE': (1, -1),
'S': (0, -1), 'SW': (-1, -1), 'W': (-1, 0), 'NW': (-1, 1),
'N2': (0, 2), 'NNE': (1, 2), 'ENE': (2, 1), 'E2': (2, 0),
'ESE': (2, -1), 'SSE': (1, -2), 'S2': (0, -2), 'SSW': (-1, -2),
'WSW': (-2, -1), 'W2': (-2, 0), 'WNW': (-2, 1), 'NNW': (-1, 2)}
def __init__(self, ax, x, y, fontsize=10, spacing=None, transform=None, **kwargs):
"""Initialize the StationPlot with items that do not change.
This sets up the axes and station locations. The `fontsize` and `spacing`
are also specified here to ensure that they are consistent between individual
station elements.
Parameters
----------
ax : matplotlib.axes.Axes
The :class:`~matplotlib.axes.Axes` for plotting
x : array_like
The x location of the stations in the plot
y : array_like
The y location of the stations in the plot
fontsize : int
The fontsize to use for drawing text
spacing : int
The spacing, in points, that corresponds to a single increment between
station plot elements.
transform : matplotlib.transforms.Transform (or compatible)
The default transform to apply to the x and y positions when plotting.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
These will be passed to all the plotting methods, and thus need to be valid
for all plot types, such as `clip_on`.
"""
self.ax = ax
self.x = np.atleast_1d(x)
self.y = np.atleast_1d(y)
self.fontsize = fontsize
self.spacing = fontsize if spacing is None else spacing
self.transform = transform
self.items = {}
self.barbs = None
self.arrows = None
self.default_kwargs = kwargs
def plot_symbol(self, location, codes, symbol_mapper, **kwargs):
"""At the specified location in the station model plot a set of symbols.
This specifies that at the offset `location`, the data in `codes` should be
converted to unicode characters (for our :data:`wx_symbol_font`) using `symbol_mapper`,
and plotted.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
If something has already been plotted at this location, it will be replaced.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this parameter. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions; increments
are multiplied by `spacing` to give offsets in x and y relative to the center.
codes : array_like
The numeric values that should be converted to unicode characters for plotting.
symbol_mapper : callable
Controls converting data values to unicode code points for the
:data:`wx_symbol_font` font. This should take a value and return a single unicode
character. See :mod:`metpy.plots.wx_symbols` for included mappers.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
.. plot::
import matplotlib.pyplot as plt
import numpy as np
from math import ceil
from metpy.plots import StationPlot
from metpy.plots.wx_symbols import current_weather, current_weather_auto
from metpy.plots.wx_symbols import low_clouds, mid_clouds, high_clouds
from metpy.plots.wx_symbols import sky_cover, pressure_tendency
def plot_symbols(mapper, name, nwrap=12, figsize=(10, 1.4)):
# Determine how many symbols there are and layout in rows of nwrap
# if there are more than nwrap symbols
num_symbols = len(mapper)
codes = np.arange(len(mapper))
ncols = nwrap
if num_symbols <= nwrap:
nrows = 1
x = np.linspace(0, 1, len(mapper))
y = np.ones_like(x)
ax_height = 0.8
else:
nrows = int(ceil(num_symbols / ncols))
x = np.tile(np.linspace(0, 1, ncols), nrows)[:num_symbols]
y = np.repeat(np.arange(nrows, 0, -1), ncols)[:num_symbols]
figsize = (10, 1 * nrows + 0.4)
ax_height = 0.8 + 0.018 * nrows
fig = plt.figure(figsize=figsize, dpi=300)
ax = fig.add_axes([0, 0, 1, ax_height])
ax.set_title(name, size=20)
ax.xaxis.set_ticks([])
ax.yaxis.set_ticks([])
ax.set_frame_on(False)
# Plot
sp = StationPlot(ax, x, y, fontsize=36)
sp.plot_symbol('C', codes, mapper)
sp.plot_parameter((0, -1), codes, fontsize=18)
ax.set_ylim(-0.05, nrows + 0.5)
plt.show()
plot_symbols(current_weather, "Current Weather Symbols")
plot_symbols(current_weather_auto, "Current Weather Auto Reported Symbols")
plot_symbols(low_clouds, "Low Cloud Symbols")
plot_symbols(mid_clouds, "Mid Cloud Symbols")
plot_symbols(high_clouds, "High Cloud Symbols")
plot_symbols(sky_cover, "Sky Cover Symbols")
plot_symbols(pressure_tendency, "Pressure Tendency Symbols")
See Also
--------
plot_barb, plot_parameter, plot_text
"""
# Make sure we use our font for symbols
kwargs['fontproperties'] = wx_symbol_font.copy()
return self.plot_parameter(location, codes, symbol_mapper, **kwargs)
def plot_parameter(self, location, parameter, formatter='.0f', **kwargs):
"""At the specified location in the station model plot a set of values.
This specifies that at the offset `location`, the data in `parameter` should be
plotted. The conversion of the data values to a string is controlled by `formatter`.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
If something has already been plotted at this location, it will be replaced.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this parameter. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions; increments
are multiplied by `spacing` to give offsets in x and y relative to the center.
parameter : array_like
The numeric values that should be plotted
formatter : str or callable, optional
How to format the data as a string for plotting. If a string, it should be
compatible with the :func:`format` builtin. If a callable, this should take a
value and return a string. Defaults to '0.f'.
plot_units: `pint.unit`
Units to plot in (performing conversion if necessary). Defaults to given units.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
plot_barb, plot_symbol, plot_text
"""
# If plot_units specified, convert the data to those units
plotting_units = kwargs.pop('plot_units', None)
parameter = self._scalar_plotting_units(parameter, plotting_units)
if hasattr(parameter, 'units'):
parameter = parameter.magnitude
text = self._to_string_list(parameter, formatter)
return self.plot_text(location, text, **kwargs)
def plot_text(self, location, text, **kwargs):
"""At the specified location in the station model plot a collection of text.
This specifies that at the offset `location`, the strings in `text` should be
plotted.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
If something has already been plotted at this location, it will be replaced.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this parameter. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions; increments
are multiplied by `spacing` to give offsets in x and y relative to the center.
text : list (or array) of strings
The strings that should be plotted
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
plot_barb, plot_parameter, plot_symbol
"""
location = self._handle_location(location)
kwargs = self._make_kwargs(kwargs)
text_collection = self.ax.scattertext(self.x, self.y, text, loc=location,
size=kwargs.pop('fontsize', self.fontsize),
**kwargs)
if location in self.items:
self.items[location].remove()
self.items[location] = text_collection
return text_collection
def plot_barb(self, u, v, **kwargs):
r"""At the center of the station model plot wind barbs.
Additional keyword arguments given will be passed onto matplotlib's
:meth:`~matplotlib.axes.Axes.barbs` function; this is useful for specifying things
like color or line width.
Parameters
----------
u : array-like
The data to use for the u-component of the barbs.
v : array-like
The data to use for the v-component of the barbs.
plot_units: `pint.unit`
Units to plot in (performing conversion if necessary). Defaults to given units.
kwargs
Additional keyword arguments to pass to matplotlib's
:meth:`~matplotlib.axes.Axes.barbs` function.
See Also
--------
plot_arrow, plot_parameter, plot_symbol, plot_text
"""
kwargs = self._make_kwargs(kwargs)
# If plot_units specified, convert the data to those units
plotting_units = kwargs.pop('plot_units', None)
u, v = self._vector_plotting_units(u, v, plotting_units)
# Empirically determined
pivot = 0.51 * np.sqrt(self.fontsize)
length = 1.95 * np.sqrt(self.fontsize)
defaults = {'sizes': {'spacing': .15, 'height': 0.5, 'emptybarb': 0.35},
'length': length, 'pivot': pivot}
defaults.update(kwargs)
# Remove old barbs
if self.barbs:
self.barbs.remove()
self.barbs = self.ax.barbs(self.x, self.y, u, v, **defaults)
def plot_arrow(self, u, v, **kwargs):
r"""At the center of the station model plot wind arrows.
Additional keyword arguments given will be passed onto matplotlib's
:meth:`~matplotlib.axes.Axes.quiver` function; this is useful for specifying things
like color or line width.
Parameters
----------
u : array-like
The data to use for the u-component of the arrows.
v : array-like
The data to use for the v-component of the arrows.
plot_units: `pint.unit`
Units to plot in (performing conversion if necessary). Defaults to given units.
kwargs
Additional keyword arguments to pass to matplotlib's
:meth:`~matplotlib.axes.Axes.barbs` function.
See Also
--------
plot_barb, plot_parameter, plot_symbol, plot_text
"""
kwargs = self._make_kwargs(kwargs)
# If plot_units specified, convert the data to those units
plotting_units = kwargs.pop('plot_units', None)
u, v = self._vector_plotting_units(u, v, plotting_units)
defaults = {'pivot': 'tail', 'scale': 20, 'scale_units': 'inches', 'width': 0.002}
defaults.update(kwargs)
# Remove old arrows
if self.arrows:
self.arrows.remove()
self.arrows = self.ax.quiver(self.x, self.y, u, v, **defaults)
@staticmethod
def _vector_plotting_units(u, v, plotting_units):
"""Handle conversion to plotting units for barbs and arrows."""
if plotting_units:
if hasattr(u, 'units') and hasattr(v, 'units'):
u = u.to(plotting_units)
v = v.to(plotting_units)
else:
raise ValueError('To convert to plotting units, units must be attached to '
'u and v wind components.')
# Strip units, CartoPy transform doesn't like
u = np.array(u)
v = np.array(v)
return u, v
@staticmethod
def _scalar_plotting_units(scalar_value, plotting_units):
"""Handle conversion to plotting units for barbs and arrows."""
if plotting_units:
if hasattr(scalar_value, 'units'):
scalar_value = scalar_value.to(plotting_units)
else:
raise ValueError('To convert to plotting units, units must be attached to '
'scalar value being converted.')
return scalar_value
def _make_kwargs(self, kwargs):
"""Assemble kwargs as necessary.
Inserts our defaults as well as ensures transform is present when appropriate.
"""
# Use default kwargs and update with additional ones
all_kw = self.default_kwargs.copy()
all_kw.update(kwargs)
# Pass transform if necessary
if 'transform' not in all_kw and self.transform:
all_kw['transform'] = self.transform
return all_kw
@staticmethod
def _to_string_list(vals, fmt):
"""Convert a sequence of values to a list of strings."""
if not callable(fmt):
def formatter(s):
"""Turn a format string into a callable."""
return format(s, fmt)
else:
formatter = fmt
return [formatter(v) if np.isfinite(v) else '' for v in vals]
def _handle_location(self, location):
"""Process locations to get a consistent set of tuples for location."""
if isinstance(location, str):
location = self.location_names[location]
xoff, yoff = location
return xoff * self.spacing, yoff * self.spacing
@exporter.export
class StationPlotLayout(dict):
r"""make a layout to encapsulate plotting using :class:`StationPlot`.
This class keeps a collection of offsets, plot formats, etc. for a parameter based
on its name. This then allows a dictionary of data (or any object that allows looking
up of arrays based on a name) to be passed to :meth:`plot()` to plot the data all at once.
See Also
--------
StationPlot
"""
class PlotTypes(Enum):
r"""Different plotting types for the layout.
Controls how items are displayed (e.g. converting values to symbols).
"""
value = 1
symbol = 2
text = 3
barb = 4
def add_value(self, location, name, fmt='.0f', units=None, **kwargs):
r"""Add a numeric value to the station layout.
This specifies that at the offset `location`, data should be pulled from the data
container using the key `name` and plotted. The conversion of the data values to
a string is controlled by `fmt`. The units required for plotting can also
be passed in using `units`, which will cause the data to be converted before
plotting.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this value. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions.
name : str
The name of the parameter, which is used as a key to pull data out of the
data container passed to :meth:`plot`.
fmt : str or callable, optional
How to format the data as a string for plotting. If a string, it should be
compatible with the :func:`format` builtin. If a callable, this should take a
value and return a string. Defaults to '0.f'.
units : pint-compatible unit, optional
The units to use for plotting. Data will be converted to this unit before
conversion to a string. If not specified, no conversion is done.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
add_barb, add_symbol, add_text
"""
self[location] = (self.PlotTypes.value, name, (fmt, units, kwargs))
def add_symbol(self, location, name, symbol_mapper, **kwargs):
r"""Add a symbol to the station layout.
This specifies that at the offset `location`, data should be pulled from the data
container using the key `name` and plotted. Data values will converted to glyphs
appropriate for MetPy's symbol font using the callable `symbol_mapper`.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
Parameters
----------
location : str or tuple[float, float]
The offset (relative to center) to plot this value. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions.
name : str
The name of the parameter, which is used as a key to pull data out of the
data container passed to :meth:`plot`.
symbol_mapper : callable
Controls converting data values to unicode code points for the
:data:`wx_symbol_font` font. This should take a value and return a single unicode
character. See :mod:`metpy.plots.wx_symbols` for included mappers.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
add_barb, add_text, add_value
"""
self[location] = (self.PlotTypes.symbol, name, (symbol_mapper, kwargs))
def add_text(self, location, name, **kwargs):
r"""Add a text field to the station layout.
This specifies that at the offset `location`, data should be pulled from the data
container using the key `name` and plotted directly as text with no conversion
applied.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or font properties.
Parameters
----------
location : str or tuple(float, float)
The offset (relative to center) to plot this value. If str, should be one of
'C', 'N', 'NE', 'E', 'SE', 'S', 'SW', 'W', or 'NW'. Otherwise, should be a tuple
specifying the number of increments in the x and y directions.
name : str
The name of the parameter, which is used as a key to pull data out of the
data container passed to :meth:`plot`.
kwargs
Additional keyword arguments to use for matplotlib's plotting functions.
See Also
--------
add_barb, add_symbol, add_value
"""
self[location] = (self.PlotTypes.text, name, kwargs)
def add_barb(self, u_name, v_name, units=None, **kwargs):
r"""Add a wind barb to the center of the station layout.
This specifies that u- and v-component data should be pulled from the data
container using the keys `u_name` and `v_name`, respectively, and plotted as
a wind barb at the center of the station plot. If `units` are given, both
components will be converted to these units.
Additional keyword arguments given will be passed onto the actual plotting
code; this is useful for specifying things like color or line width.
Parameters
----------
u_name : str
The name of the parameter for the u-component for `barbs`, which is used as
a key to pull data out of the data container passed to :meth:`plot`.
v_name : str
The name of the parameter for the v-component for `barbs`, which is used as
a key to pull data out of the data container passed to :meth:`plot`.
units : pint-compatible unit, optional
The units to use for plotting. Data will be converted to this unit before
conversion to a string. If not specified, no conversion is done.
kwargs
Additional keyword arguments to use for matplotlib's
:meth:`~matplotlib.axes.Axes.barbs` function.
See Also
--------
add_symbol, add_text, add_value
"""
# Not sure if putting the v_name as a plot-specific option is appropriate,
# but it seems simpler than making name code in plot handle tuples
self['barb'] = (self.PlotTypes.barb, (u_name, v_name), (units, kwargs))
def names(self):
"""Get the list of names used by the layout.
Returns
-------
list[str]
the list of names of variables used by the layout
"""
ret = []
for item in self.values():
if item[0] == self.PlotTypes.barb:
ret.extend(item[1])
else:
ret.append(item[1])
return ret
def plot(self, plotter, data_dict):
"""Plot a collection of data using this layout for a station plot.
This function iterates through the entire specified layout, pulling the fields named
in the layout from `data_dict` and plotting them using `plotter` as specified
in the layout. Fields present in the layout, but not in `data_dict`, are ignored.
Parameters
----------
plotter : StationPlot
:class:`StationPlot` to use to plot the data. This controls the axes,
spacing, station locations, etc.
data_dict : dict[str, array-like]
Data container that maps a name to an array of data. Data from this object
will be used to fill out the station plot.
"""
def coerce_data(dat, u):
try:
return dat.to(u).magnitude
except AttributeError:
return dat
for loc, info in self.items():
typ, name, args = info
if typ == self.PlotTypes.barb:
# Try getting the data
u_name, v_name = name
u_data = data_dict.get(u_name)
v_data = data_dict.get(v_name)
# Plot if we have the data
if not (v_data is None or u_data is None):
units, kwargs = args
plotter.plot_barb(coerce_data(u_data, units), coerce_data(v_data, units),
**kwargs)
else:
# Check that we have the data for this location
data = data_dict.get(name)
if data is not None:
# If we have it, hand it to the appropriate method
if typ == self.PlotTypes.value:
fmt, units, kwargs = args
plotter.plot_parameter(loc, coerce_data(data, units), fmt, **kwargs)
elif typ == self.PlotTypes.symbol:
mapper, kwargs = args
plotter.plot_symbol(loc, data, mapper, **kwargs)
elif typ == self.PlotTypes.text:
plotter.plot_text(loc, data, **args)
def __repr__(self):
"""Return string representation of layout."""
return ('{'
+ ', '.join('{0}: ({1[0].name}, {1[1]}, ...)'.format(loc, info)
for loc, info in sorted(self.items()))
+ '}')
with exporter:
#: :desc: Simple station plot layout
simple_layout = StationPlotLayout()
simple_layout.add_barb('eastward_wind', 'northward_wind', 'knots')
simple_layout.add_value('NW', 'air_temperature', units='degC')
simple_layout.add_value('SW', 'dew_point_temperature', units='degC')
simple_layout.add_value('NE', 'air_pressure_at_sea_level', units='mbar',
fmt=lambda v: format(10 * v, '03.0f')[-3:])
simple_layout.add_symbol('C', 'cloud_coverage', sky_cover)
simple_layout.add_symbol('W', 'present_weather', current_weather)
#: Full NWS station plot `layout`__
#:
#: __ http://oceanservice.noaa.gov/education/yos/resource/JetStream/synoptic/wxmaps.htm
nws_layout = StationPlotLayout()
nws_layout.add_value((-1, 1), 'air_temperature', units='degF')
nws_layout.add_symbol((0, 2), 'high_cloud_type', high_clouds)
nws_layout.add_symbol((0, 1), 'medium_cloud_type', mid_clouds)
nws_layout.add_symbol((0, -1), 'low_cloud_type', low_clouds)
nws_layout.add_value((1, 1), 'air_pressure_at_sea_level', units='mbar',
fmt=lambda v: format(10 * v, '03.0f')[-3:])
nws_layout.add_value((-2, 0), 'visibility_in_air', fmt='.0f', units='miles')
nws_layout.add_symbol((-1, 0), 'present_weather', current_weather)
nws_layout.add_symbol((0, 0), 'cloud_coverage', sky_cover)
nws_layout.add_value((1, 0), 'tendency_of_air_pressure', units='mbar',
fmt=lambda v: ('-' if v < 0 else '') + format(10 * abs(v), '02.0f'))
nws_layout.add_symbol((2, 0), 'tendency_of_air_pressure_symbol', pressure_tendency)
nws_layout.add_barb('eastward_wind', 'northward_wind', units='knots')
nws_layout.add_value((-1, -1), 'dew_point_temperature', units='degF')
# TODO: Fix once we have the past weather symbols converted
nws_layout.add_symbol((1, -1), 'past_weather', current_weather)
| |
from setuptools import setup, Extension, find_packages
import subprocess
import errno
import re
import os
import shutil
import sys
import zipfile
from urllib.request import urlretrieve
import numpy
from Cython.Build import cythonize
isWindows = os.name == 'nt'
isMac = sys.platform == 'darwin'
is64Bit = sys.maxsize > 2**32
# adapted from cffi's setup.py
# the following may be overridden if pkg-config exists
libraries = ['lensfun']
include_dirs = []
library_dirs = []
extra_compile_args = []
extra_link_args = []
def _ask_pkg_config(resultlist, option, result_prefix='', sysroot=False):
pkg_config = os.environ.get('PKG_CONFIG','pkg-config')
try:
p = subprocess.Popen([pkg_config, option, 'lensfun'],
stdout=subprocess.PIPE)
except OSError as e:
if e.errno != errno.ENOENT:
raise
else:
t = p.stdout.read().decode().strip()
if p.wait() == 0:
res = t.split()
# '-I/usr/...' -> '/usr/...'
for x in res:
assert x.startswith(result_prefix)
res = [x[len(result_prefix):] for x in res]
sysroot = sysroot and os.environ.get('PKG_CONFIG_SYSROOT_DIR', '')
if sysroot:
# old versions of pkg-config don't support this env var,
# so here we emulate its effect if needed
res = [path if path.startswith(sysroot)
else sysroot + path
for path in res]
resultlist[:] = res
def use_pkg_config():
_ask_pkg_config(include_dirs, '--cflags-only-I', '-I', sysroot=True)
_ask_pkg_config(extra_compile_args, '--cflags-only-other')
_ask_pkg_config(library_dirs, '--libs-only-L', '-L', sysroot=True)
_ask_pkg_config(extra_link_args, '--libs-only-other')
_ask_pkg_config(libraries, '--libs-only-l', '-l')
if isWindows or isMac:
cmake_build = os.path.abspath('external/lensfun/build')
install_dir = os.path.join(cmake_build, 'install')
include_dirs += [os.path.join(install_dir, 'include', 'lensfun')]
library_dirs += [os.path.join(install_dir, 'lib')]
else:
use_pkg_config()
# this must be after use_pkg_config()!
include_dirs += [numpy.get_include()]
# for version_helper.h
include_dirs += [os.path.abspath('lensfunpy')]
def clone_submodules():
if not os.path.exists('external/lensfun/README.md'):
print('lensfun git submodule not cloned yet, will invoke "git submodule update --init" now')
if os.system('git submodule update --init') != 0:
raise Exception('git failed')
def windows_lensfun_compile():
clone_submodules()
cwd = os.getcwd()
# Download cmake to build lensfun
cmake_version = '3.13.4'
cmake_url = 'https://github.com/Kitware/CMake/releases/download/v{v}/cmake-{v}-win32-x86.zip'.format(v=cmake_version)
cmake = os.path.abspath('external/cmake-{}-win32-x86/bin/cmake.exe'.format(cmake_version))
# Download vcpkg to build dependencies of lensfun
vcpkg_commit = '2021.05.12'
vcpkg_url = 'https://github.com/Microsoft/vcpkg/archive/{}.zip'.format(vcpkg_commit)
vcpkg_dir = os.path.abspath('external/vcpkg-{}'.format(vcpkg_commit))
vcpkg_bootstrap = os.path.join(vcpkg_dir, 'bootstrap-vcpkg.bat')
vcpkg = os.path.join(vcpkg_dir, 'vcpkg.exe')
files = [(cmake_url, 'external', cmake),
(vcpkg_url, 'external', vcpkg_bootstrap)]
for url, extractdir, extractcheck in files:
if not os.path.exists(extractcheck):
path = 'external/' + os.path.basename(url)
if not os.path.exists(path):
print('Downloading', url)
try:
urlretrieve(url, path)
except:
# repeat once in case of network issues
urlretrieve(url, path)
with zipfile.ZipFile(path) as z:
print('Extracting', path, 'into', extractdir)
z.extractall(extractdir)
if not os.path.exists(path):
raise RuntimeError(path + ' not found!')
# Bootstrap vcpkg
os.chdir(vcpkg_dir)
if not os.path.exists(vcpkg):
code = os.system(vcpkg_bootstrap)
if code != 0:
sys.exit(code)
# lensfun depends on glib2, so let's build it with vcpkg
vcpkg_arch = 'x64' if is64Bit else 'x86'
vcpkg_triplet = '{}-windows'.format(vcpkg_arch)
code = os.system(vcpkg + ' install glib:' + vcpkg_triplet)
if code != 0:
sys.exit(code)
vcpkg_install_dir = os.path.join(vcpkg_dir, 'installed', vcpkg_triplet)
# bundle runtime dlls
vcpkg_bin_dir = os.path.join(vcpkg_install_dir, 'bin')
glib2_dll = os.path.join(vcpkg_bin_dir, 'glib-2.0-0.dll')
# configure and compile lensfun
if not os.path.exists(cmake_build):
os.mkdir(cmake_build)
os.chdir(cmake_build)
# temporary hack to avoid https://stackoverflow.com/a/53547931
# (python module not needed here anyway)
patch_path = '../apps/CMakeLists.txt'
with open(patch_path) as f:
content = f.read()
content = content.replace('IF(PYTHON)', 'IF(FALSE)')
with open(patch_path, 'w') as f:
f.write(content)
cmds = [cmake + ' .. -G "NMake Makefiles" -DCMAKE_BUILD_TYPE=Release ' +\
'-DBUILD_TESTS=off -DINSTALL_HELPER_SCRIPTS=off ' +\
'-DCMAKE_TOOLCHAIN_FILE={}/scripts/buildsystems/vcpkg.cmake '.format(vcpkg_dir) +\
'-DGLIB2_BASE_DIR={} -DGLIB2_DLL={} -DCMAKE_INSTALL_PREFIX=install'.format(vcpkg_install_dir, glib2_dll),
cmake + ' --build .',
cmake + ' --build . --target install',
]
for cmd in cmds:
print(cmd)
code = os.system(cmd)
if code != 0:
sys.exit(code)
os.chdir(cwd)
dll_runtime_libs = [('lensfun.dll', os.path.join(install_dir, 'bin')),
('glib-2.0-0.dll', vcpkg_bin_dir),
# dependencies of glib
('pcre.dll', vcpkg_bin_dir),
('iconv-2.dll', vcpkg_bin_dir),
('charset-1.dll', vcpkg_bin_dir),
('intl-8.dll', vcpkg_bin_dir),
]
for filename, folder in dll_runtime_libs:
src = os.path.join(folder, filename)
dest = 'lensfunpy/' + filename
print('copying', src, '->', dest)
shutil.copyfile(src, dest)
def mac_lensfun_compile():
clone_submodules()
# configure and compile lensfun
cwd = os.getcwd()
if not os.path.exists(cmake_build):
os.mkdir(cmake_build)
os.chdir(cmake_build)
install_name_dir = os.path.join(install_dir, 'lib')
cmds = ['cmake .. -DCMAKE_BUILD_TYPE=Release ' +\
'-DBUILD_TESTS=off -DINSTALL_HELPER_SCRIPTS=off ' +\
'-DCMAKE_INSTALL_PREFIX=install ' +\
'-DCMAKE_INSTALL_NAME_DIR=' + install_name_dir,
'cmake --build .',
'cmake --build . --target install',
]
for cmd in cmds:
print(cmd)
code = os.system(cmd)
if code != 0:
sys.exit(code)
os.chdir(cwd)
def bundle_db_files():
import glob
db_files = 'lensfunpy/db_files'
if not os.path.exists(db_files):
os.makedirs(db_files)
for path in glob.glob('external/lensfun/data/db/*.xml'):
dest = os.path.join(db_files, os.path.basename(path))
print('copying', path, '->', dest)
shutil.copyfile(path, dest)
package_data = {'lensfunpy': []}
# evil hack, check cmd line for relevant commands
# custom cmdclasses didn't work out in this case
cmdline = ''.join(sys.argv[1:])
needsCompile = any(s in cmdline for s in ['install', 'bdist', 'build_ext', 'wheel', 'nosetests'])
if isWindows and needsCompile:
windows_lensfun_compile()
package_data['lensfunpy'].append('*.dll')
elif isMac and needsCompile:
mac_lensfun_compile()
if any(s in cmdline for s in ['clean', 'sdist']):
# When running sdist after a previous run of bdist or build_ext
# then even with the 'clean' command the .egg-info folder stays.
# This folder contains SOURCES.txt which in turn is used by sdist
# to include package data files, but we don't want .dll's and .xml
# files in our source distribution. Therefore, to prevent accidents,
# we help a little...
egg_info = 'lensfunpy.egg-info'
print('removing', egg_info)
shutil.rmtree(egg_info, ignore_errors=True)
if 'sdist' not in cmdline:
# This assumes that the lensfun version from external/lensfun was used.
# If that's not the case, the bundled files may fail to load, for example,
# if lensfunpy was linked against an older lensfun version already on
# the system (Linux mostly) and the database format changed in an incompatible way.
# In that case, loading of bundled files can still be disabled
# with Database(load_bundled=False).
package_data['lensfunpy'].append('db_files/*.xml')
bundle_db_files()
# Support for optional Cython line tracing
# run the following to generate a test coverage report:
# $ export LINETRACE=1
# $ python setup.py build_ext --inplace
# $ nosetests --with-coverage --cover-html --cover-package=lensfunpy
compdirectives = {}
macros = []
if (os.environ.get('LINETRACE', False)):
compdirectives['linetrace'] = True
macros.append(('CYTHON_TRACE', '1'))
extensions = cythonize([Extension("lensfunpy._lensfun",
include_dirs=include_dirs,
sources=[os.path.join('lensfunpy', '_lensfun.pyx')],
libraries=libraries,
library_dirs=library_dirs,
extra_compile_args=extra_compile_args,
extra_link_args=extra_link_args,
define_macros=macros
)],
compiler_directives=compdirectives)
# make __version__ available (https://stackoverflow.com/a/16084844)
exec(open('lensfunpy/_version.py').read())
setup(
name = 'lensfunpy',
version = __version__,
description = 'Lens distortion correction for Python, a wrapper for lensfun',
long_description = open('README.rst').read(),
author = 'Maik Riechert',
author_email = 'maik.riechert@arcor.de',
url = 'https://github.com/letmaik/lensfunpy',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Cython',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Topic :: Multimedia :: Graphics',
'Topic :: Software Development :: Libraries',
],
packages = find_packages(),
ext_modules = extensions,
package_data = package_data,
install_requires=['numpy']
)
| |
# Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Nodes for unary and binary operations.
No short-circuit involved, boolean 'not' is an unary operation like '-' is,
no real difference.
"""
import copy
import math
from abc import abstractmethod
from nuitka import PythonOperators
from nuitka.Errors import NuitkaAssumptionError
from nuitka.PythonVersions import python_version
from .ExpressionBases import ExpressionChildrenHavingBase
from .NodeMakingHelpers import (
makeRaiseExceptionReplacementExpressionFromInstance,
wrapExpressionWithSideEffects,
)
from .shapes.BuiltinTypeShapes import tshape_bool, tshape_int_or_long
from .shapes.StandardShapes import (
ShapeLargeConstantValue,
ShapeLargeConstantValuePredictable,
vshape_unknown,
)
class ExpressionPropertiesFromTypeShapeMixin(object):
"""Given a self.type_shape, this can derive default properties from there."""
# Mixins are required to slots
__slots__ = ()
def isKnownToBeHashable(self):
return self.type_shape.hasShapeSlotHash()
class ExpressionOperationBinaryBase(
ExpressionPropertiesFromTypeShapeMixin, ExpressionChildrenHavingBase
):
"""Base class for all binary operation expression."""
__slots__ = ("type_shape", "escape_desc", "inplace_suspect", "shape")
named_children = ("left", "right")
nice_children = tuple(child_name + " operand" for child_name in named_children)
def __init__(self, left, right, source_ref):
ExpressionChildrenHavingBase.__init__(
self, values={"left": left, "right": right}, source_ref=source_ref
)
self.type_shape = None
self.escape_desc = None
self.inplace_suspect = False
self.shape = vshape_unknown
@staticmethod
def isExpressionOperationBinary():
return True
def getOperator(self):
return self.operator
def markAsInplaceSuspect(self):
self.inplace_suspect = True
def unmarkAsInplaceSuspect(self):
self.inplace_suspect = False
def isInplaceSuspect(self):
return self.inplace_suspect
def getOperands(self):
return (self.subnode_left, self.subnode_right)
def mayRaiseExceptionOperation(self):
return self.escape_desc.getExceptionExit() is not None
def mayRaiseException(self, exception_type):
# TODO: Match getExceptionExit() more precisely against exception type given
return (
self.escape_desc is None
or self.escape_desc.getExceptionExit() is not None
or self.subnode_left.mayRaiseException(exception_type)
or self.subnode_right.mayRaiseException(exception_type)
)
def getTypeShape(self):
# Question might be asked early on, later this is cached from last computation.
if self.type_shape is None:
self.type_shape, self.escape_desc = self._getOperationShape(
self.subnode_left.getTypeShape(), self.subnode_right.getTypeShape()
)
return self.type_shape
@abstractmethod
def _getOperationShape(self, left_shape, right_shape):
pass
@staticmethod
def canCreateUnsupportedException(left_shape, right_shape):
return hasattr(left_shape, "typical_value") and hasattr(
right_shape, "typical_value"
)
def createUnsupportedException(self, left_shape, right_shape):
left = left_shape.typical_value
right = right_shape.typical_value
try:
self.simulator(left, right)
except TypeError as e:
return e
except Exception as e:
raise NuitkaAssumptionError(
"Unexpected exception type doing operation simulation",
self.operator,
self.simulator,
left_shape,
right_shape,
repr(left),
repr(right),
e,
"!=",
)
else:
raise NuitkaAssumptionError(
"Unexpected no-exception doing operation simulation",
self.operator,
self.simulator,
left_shape,
right_shape,
repr(left),
repr(right),
)
@staticmethod
def _isTooLarge():
return False
def _simulateOperation(self, trace_collection):
left_value = self.subnode_left.getCompileTimeConstant()
right_value = self.subnode_right.getCompileTimeConstant()
# Avoid mutating owned by nodes values and potentially shared.
if self.subnode_left.isMutable():
left_value = copy.copy(left_value)
return trace_collection.getCompileTimeComputationResult(
node=self,
computation=lambda: self.simulator(left_value, right_value),
description="Operator '%s' with constant arguments." % self.operator,
)
def computeExpression(self, trace_collection):
# Nothing to do anymore for large constants.
if self.shape is not None and self.shape.isConstant():
return self, None, None
left = self.subnode_left
left_shape = left.getTypeShape()
right = self.subnode_right
right_shape = right.getTypeShape()
self.type_shape, self.escape_desc = self._getOperationShape(
left_shape, right_shape
)
if left.isCompileTimeConstant() and right.isCompileTimeConstant():
if not self._isTooLarge():
return self._simulateOperation(trace_collection)
exception_raise_exit = self.escape_desc.getExceptionExit()
if exception_raise_exit is not None:
trace_collection.onExceptionRaiseExit(exception_raise_exit)
if self.escape_desc.isUnsupported() and self.canCreateUnsupportedException(
left_shape, right_shape
):
result = wrapExpressionWithSideEffects(
new_node=makeRaiseExceptionReplacementExpressionFromInstance(
expression=self,
exception=self.createUnsupportedException(
left_shape,
right_shape,
),
),
old_node=self,
side_effects=(left, right),
)
return (
result,
"new_raise",
"Replaced operator '%s' with %s %s arguments that cannot work."
% (self.operator, left_shape, right_shape),
)
if self.escape_desc.isValueEscaping():
# The value of these nodes escaped and could change its contents.
trace_collection.removeKnowledge(left)
trace_collection.removeKnowledge(right)
if self.escape_desc.isControlFlowEscape():
# Any code could be run, note that.
trace_collection.onControlFlowEscape(self)
return self, None, None
def canPredictIterationValues(self):
# TODO: Actually we could very well, esp. for sequence repeats.
# pylint: disable=no-self-use
return False
class ExpressionOperationAddMixin(object):
# Mixins are not allow to specify slots, pylint: disable=assigning-non-slot
__slots__ = ()
def getValueShape(self):
return self.shape
def _isTooLarge(self):
if self.subnode_left.isKnownToBeIterable(
None
) and self.subnode_right.isKnownToBeIterable(None):
size = (
self.subnode_left.getIterationLength()
+ self.subnode_right.getIterationLength()
)
# TODO: Actually could make a predictor, but we don't use it yet.
self.shape = ShapeLargeConstantValuePredictable(
size=size,
predictor=None, # predictValuesFromRightAndLeftValue,
shape=self.subnode_left.getTypeShape(),
)
return size > 256
else:
return False
class ExpressionOperationBinaryAdd(
ExpressionOperationAddMixin, ExpressionOperationBinaryBase
):
kind = "EXPRESSION_OPERATION_BINARY_ADD"
def __init__(self, left, right, source_ref):
ExpressionOperationBinaryBase.__init__(
self, left=left, right=right, source_ref=source_ref
)
operator = "Add"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryAddShape(right_shape)
class ExpressionOperationBinarySub(ExpressionOperationBinaryBase):
kind = "EXPRESSION_OPERATION_BINARY_SUB"
operator = "Sub"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinarySubShape(right_shape)
class ExpressionOperationMultMixin(object):
# Mixins are not allow to specify slots, pylint: disable=assigning-non-slot
__slots__ = ()
def getValueShape(self):
return self.shape
def _isTooLarge(self):
if self.subnode_right.isNumberConstant():
iter_length = self.subnode_left.getIterationLength()
if iter_length is not None:
size = iter_length * self.subnode_right.getCompileTimeConstant()
if size > 256:
self.shape = ShapeLargeConstantValuePredictable(
size=size,
predictor=None, # predictValuesFromRightAndLeftValue,
shape=self.subnode_left.getTypeShape(),
)
return True
if self.subnode_left.isNumberConstant():
if (
self.subnode_left.isIndexConstant()
and self.subnode_right.isIndexConstant()
):
# Estimate with logarithm, if the result of number
# calculations is computable with acceptable effort,
# otherwise, we will have to do it at runtime.
left_value = self.subnode_left.getCompileTimeConstant()
if left_value != 0:
right_value = self.subnode_right.getCompileTimeConstant()
# TODO: Is this really useful, can this be really slow.
if right_value != 0:
if (
math.log10(abs(left_value))
+ math.log10(abs(right_value))
> 20
):
self.shape = ShapeLargeConstantValue(
size=None, shape=tshape_int_or_long
)
return True
elif self.subnode_left.isNumberConstant():
iter_length = self.subnode_right.getIterationLength()
if iter_length is not None:
left_value = self.subnode_left.getCompileTimeConstant()
size = iter_length * left_value
if iter_length * left_value > 256:
self.shape = ShapeLargeConstantValuePredictable(
size=size,
predictor=None, # predictValuesFromRightAndLeftValue,
shape=self.subnode_right.getTypeShape(),
)
return True
return False
class ExpressionOperationBinaryMult(
ExpressionOperationMultMixin, ExpressionOperationBinaryBase
):
kind = "EXPRESSION_OPERATION_BINARY_MULT"
operator = "Mult"
simulator = PythonOperators.binary_operator_functions[operator]
def __init__(self, left, right, source_ref):
ExpressionOperationBinaryBase.__init__(
self, left=left, right=right, source_ref=source_ref
)
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryMultShape(right_shape)
def getIterationLength(self):
left_length = self.subnode_left.getIterationLength()
if left_length is not None:
right_value = self.subnode_right.getIntegerValue()
if right_value is not None:
return left_length * right_value
right_length = self.subnode_right.getIterationLength()
if right_length is not None:
left_value = self.subnode_left.getIntegerValue()
if left_value is not None:
return right_length * left_value
return ExpressionOperationBinaryBase.getIterationLength(self)
def extractSideEffects(self):
left_length = self.subnode_left.getIterationLength()
if left_length is not None:
right_value = self.subnode_right.getIntegerValue()
if right_value is not None:
return (
self.subnode_left.extractSideEffects()
+ self.subnode_right.extractSideEffects()
)
right_length = self.subnode_right.getIterationLength()
if right_length is not None:
left_value = self.subnode_left.getIntegerValue()
if left_value is not None:
return (
self.subnode_left.extractSideEffects()
+ self.subnode_right.extractSideEffects()
)
return ExpressionOperationBinaryBase.extractSideEffects(self)
class ExpressionOperationBinaryFloorDiv(ExpressionOperationBinaryBase):
kind = "EXPRESSION_OPERATION_BINARY_FLOOR_DIV"
operator = "FloorDiv"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryFloorDivShape(right_shape)
if python_version < 0x300:
class ExpressionOperationBinaryOldDiv(ExpressionOperationBinaryBase):
kind = "EXPRESSION_OPERATION_BINARY_OLD_DIV"
operator = "OldDiv"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryOldDivShape(right_shape)
class ExpressionOperationBinaryTrueDiv(ExpressionOperationBinaryBase):
kind = "EXPRESSION_OPERATION_BINARY_TRUE_DIV"
operator = "TrueDiv"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryTrueDivShape(right_shape)
class ExpressionOperationBinaryMod(ExpressionOperationBinaryBase):
kind = "EXPRESSION_OPERATION_BINARY_MOD"
operator = "Mod"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryModShape(right_shape)
class ExpressionOperationBinaryDivmod(ExpressionOperationBinaryBase):
kind = "EXPRESSION_OPERATION_BINARY_DIVMOD"
operator = "Divmod"
simulator = PythonOperators.binary_operator_functions[operator]
def __init__(self, left, right, source_ref):
ExpressionOperationBinaryBase.__init__(
self, left=left, right=right, source_ref=source_ref
)
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryDivmodShape(right_shape)
class ExpressionOperationBinaryPow(ExpressionOperationBinaryBase):
kind = "EXPRESSION_OPERATION_BINARY_POW"
operator = "Pow"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryPowShape(right_shape)
class ExpressionOperationBinaryLshift(ExpressionOperationBinaryBase):
kind = "EXPRESSION_OPERATION_BINARY_LSHIFT"
operator = "LShift"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryLShiftShape(right_shape)
class ExpressionOperationBinaryRshift(ExpressionOperationBinaryBase):
kind = "EXPRESSION_OPERATION_BINARY_RSHIFT"
operator = "RShift"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryRShiftShape(right_shape)
class ExpressionOperationBinaryBitOr(ExpressionOperationBinaryBase):
kind = "EXPRESSION_OPERATION_BINARY_BIT_OR"
operator = "BitOr"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryBitOrShape(right_shape)
class ExpressionOperationBinaryBitAnd(ExpressionOperationBinaryBase):
kind = "EXPRESSION_OPERATION_BINARY_BIT_AND"
operator = "BitAnd"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryBitAndShape(right_shape)
class ExpressionOperationBinaryBitXor(ExpressionOperationBinaryBase):
kind = "EXPRESSION_OPERATION_BINARY_BIT_XOR"
operator = "BitXor"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryBitXorShape(right_shape)
if python_version >= 0x350:
class ExpressionOperationBinaryMatMult(ExpressionOperationBinaryBase):
kind = "EXPRESSION_OPERATION_BINARY_MAT_MULT"
operator = "MatMult"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryMatMultShape(right_shape)
_operator2binary_operation_nodeclass = {
"Add": ExpressionOperationBinaryAdd,
"Sub": ExpressionOperationBinarySub,
"Mult": ExpressionOperationBinaryMult,
"FloorDiv": ExpressionOperationBinaryFloorDiv,
"TrueDiv": ExpressionOperationBinaryTrueDiv,
"Mod": ExpressionOperationBinaryMod,
# Divmod only from built-in call.
"Pow": ExpressionOperationBinaryPow,
"LShift": ExpressionOperationBinaryLshift,
"RShift": ExpressionOperationBinaryRshift,
"BitOr": ExpressionOperationBinaryBitOr,
"BitAnd": ExpressionOperationBinaryBitAnd,
"BitXor": ExpressionOperationBinaryBitXor,
}
if python_version < 0x300:
_operator2binary_operation_nodeclass["OldDiv"] = ExpressionOperationBinaryOldDiv
if python_version >= 0x350:
_operator2binary_operation_nodeclass["MatMult"] = ExpressionOperationBinaryMatMult
def makeBinaryOperationNode(operator, left, right, source_ref):
node_class = _operator2binary_operation_nodeclass[operator]
return node_class(left=left, right=right, source_ref=source_ref)
class ExpressionOperationBinaryInplaceBase(ExpressionOperationBinaryBase):
# Base classes can be abstract, pylint: disable=abstract-method
"""Base class for all inplace operations."""
def __init__(self, left, right, source_ref):
ExpressionOperationBinaryBase.__init__(
self, left=left, right=right, source_ref=source_ref
)
self.inplace_suspect = True
@staticmethod
def isExpressionOperationInplace():
return True
def computeExpression(self, trace_collection):
# Nothing to do anymore for large constants.
if self.shape is not None and self.shape.isConstant():
return self, None, None
left = self.subnode_left
left_shape = left.getTypeShape()
right = self.subnode_right
right_shape = right.getTypeShape()
self.type_shape, self.escape_desc = self._getOperationShape(
left_shape, right_shape
)
if left.isCompileTimeConstant() and right.isCompileTimeConstant():
if not self._isTooLarge():
return self._simulateOperation(trace_collection)
exception_raise_exit = self.escape_desc.getExceptionExit()
if exception_raise_exit is not None:
trace_collection.onExceptionRaiseExit(exception_raise_exit)
if self.escape_desc.isUnsupported() and self.canCreateUnsupportedException(
left_shape, right_shape
):
result = wrapExpressionWithSideEffects(
new_node=makeRaiseExceptionReplacementExpressionFromInstance(
expression=self,
exception=self.createUnsupportedException(
left_shape,
right_shape,
),
),
old_node=self,
side_effects=(left, right),
)
return (
result,
"new_raise",
"Replaced inplace-operator '%s' with %s %s arguments that cannot work."
% (self.operator, left_shape, right_shape),
)
if self.escape_desc.isValueEscaping():
# The value of these nodes escaped and could change its contents.
trace_collection.removeKnowledge(left)
trace_collection.removeKnowledge(right)
if self.escape_desc.isControlFlowEscape():
# Any code could be run, note that.
trace_collection.onControlFlowEscape(self)
if left_shape is tshape_bool:
result = makeBinaryOperationNode(
self.operator[1:], left, right, self.source_ref
)
return trace_collection.computedExpressionResult(
result,
"new_expression",
"Lowered inplace-operator '%s' to binary operation." % self.operator,
)
return self, None, None
class ExpressionOperationInplaceAdd(
ExpressionOperationAddMixin, ExpressionOperationBinaryInplaceBase
):
kind = "EXPRESSION_OPERATION_INPLACE_ADD"
operator = "IAdd"
simulator = PythonOperators.binary_operator_functions[operator]
def __init__(self, left, right, source_ref):
ExpressionOperationBinaryInplaceBase.__init__(
self, left=left, right=right, source_ref=source_ref
)
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationInplaceAddShape(right_shape)
class ExpressionOperationInplaceSub(ExpressionOperationBinaryInplaceBase):
kind = "EXPRESSION_OPERATION_INPLACE_SUB"
operator = "ISub"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinarySubShape(right_shape)
class ExpressionOperationInplaceMult(ExpressionOperationBinaryInplaceBase):
kind = "EXPRESSION_OPERATION_INPLACE_MULT"
operator = "IMult"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryMultShape(right_shape)
class ExpressionOperationInplaceFloorDiv(ExpressionOperationBinaryInplaceBase):
kind = "EXPRESSION_OPERATION_INPLACE_FLOOR_DIV"
operator = "IFloorDiv"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryFloorDivShape(right_shape)
if python_version < 0x300:
class ExpressionOperationInplaceOldDiv(ExpressionOperationBinaryInplaceBase):
kind = "EXPRESSION_OPERATION_INPLACE_OLD_DIV"
operator = "IOldDiv"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryOldDivShape(right_shape)
class ExpressionOperationInplaceTrueDiv(ExpressionOperationBinaryInplaceBase):
kind = "EXPRESSION_OPERATION_INPLACE_TRUE_DIV"
operator = "ITrueDiv"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryTrueDivShape(right_shape)
class ExpressionOperationInplaceMod(ExpressionOperationBinaryInplaceBase):
kind = "EXPRESSION_OPERATION_INPLACE_MOD"
operator = "IMod"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryModShape(right_shape)
class ExpressionOperationInplacePow(ExpressionOperationBinaryInplaceBase):
kind = "EXPRESSION_OPERATION_INPLACE_POW"
operator = "IPow"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryPowShape(right_shape)
class ExpressionOperationInplaceLshift(ExpressionOperationBinaryInplaceBase):
kind = "EXPRESSION_OPERATION_INPLACE_LSHIFT"
operator = "ILShift"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryLShiftShape(right_shape)
class ExpressionOperationInplaceRshift(ExpressionOperationBinaryInplaceBase):
kind = "EXPRESSION_OPERATION_INPLACE_RSHIFT"
operator = "IRShift"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryRShiftShape(right_shape)
class ExpressionOperationInplaceBitOr(ExpressionOperationBinaryInplaceBase):
kind = "EXPRESSION_OPERATION_INPLACE_BIT_OR"
operator = "IBitOr"
simulator = PythonOperators.binary_operator_functions[operator]
# No inplace bitor special handling before 3.9
if python_version < 0x390:
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryBitOrShape(right_shape)
else:
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationInplaceBitOrShape(right_shape)
class ExpressionOperationInplaceBitAnd(ExpressionOperationBinaryInplaceBase):
kind = "EXPRESSION_OPERATION_INPLACE_BIT_AND"
operator = "IBitAnd"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryBitAndShape(right_shape)
class ExpressionOperationInplaceBitXor(ExpressionOperationBinaryInplaceBase):
kind = "EXPRESSION_OPERATION_INPLACE_BIT_XOR"
operator = "IBitXor"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryBitXorShape(right_shape)
if python_version >= 0x350:
class ExpressionOperationInplaceMatMult(ExpressionOperationBinaryInplaceBase):
kind = "EXPRESSION_OPERATION_INPLACE_MAT_MULT"
operator = "IMatMult"
simulator = PythonOperators.binary_operator_functions[operator]
@staticmethod
def _getOperationShape(left_shape, right_shape):
return left_shape.getOperationBinaryMatMultShape(right_shape)
_operator2binary_inplace_nodeclass = {
"IAdd": ExpressionOperationInplaceAdd,
"ISub": ExpressionOperationInplaceSub,
"IMult": ExpressionOperationInplaceMult,
"IFloorDiv": ExpressionOperationInplaceFloorDiv,
"ITrueDiv": ExpressionOperationInplaceTrueDiv,
"IMod": ExpressionOperationInplaceMod,
"IPow": ExpressionOperationInplacePow,
"ILShift": ExpressionOperationInplaceLshift,
"IRShift": ExpressionOperationInplaceRshift,
"IBitOr": ExpressionOperationInplaceBitOr,
"IBitAnd": ExpressionOperationInplaceBitAnd,
"IBitXor": ExpressionOperationInplaceBitXor,
}
if python_version < 0x300:
_operator2binary_inplace_nodeclass["IOldDiv"] = ExpressionOperationInplaceOldDiv
if python_version >= 0x350:
_operator2binary_inplace_nodeclass["IMatMult"] = ExpressionOperationInplaceMatMult
def makeExpressionOperationBinaryInplace(operator, left, right, source_ref):
node_class = _operator2binary_inplace_nodeclass[operator]
return node_class(left=left, right=right, source_ref=source_ref)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ResNet50 model definition compatible with TensorFlow's eager execution.
Reference [Deep Residual Learning for Image
Recognition](https://arxiv.org/abs/1512.03385)
Adapted from tf.keras.applications.ResNet50. A notable difference is that the
model here outputs logits while the Keras model outputs probability.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tensorflow as tf
import tensorflow.contrib.eager as tfe
class _IdentityBlock(tfe.Network):
"""_IdentityBlock is the block that has no conv layer at shortcut.
Args:
kernel_size: the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
data_format: data_format for the input ('channels_first' or
'channels_last').
"""
def __init__(self, kernel_size, filters, stage, block, data_format):
super(_IdentityBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
bn_axis = 1 if data_format == 'channels_first' else 3
self.conv2a = self.track_layer(
tf.layers.Conv2D(
filters1, (1, 1),
name=conv_name_base + '2a',
data_format=data_format))
self.bn2a = self.track_layer(
tf.layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a'))
self.conv2b = self.track_layer(
tf.layers.Conv2D(
filters2,
kernel_size,
padding='same',
data_format=data_format,
name=conv_name_base + '2b'))
self.bn2b = self.track_layer(
tf.layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b'))
self.conv2c = self.track_layer(
tf.layers.Conv2D(
filters3, (1, 1),
name=conv_name_base + '2c',
data_format=data_format))
self.bn2c = self.track_layer(
tf.layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c'))
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
x += input_tensor
return tf.nn.relu(x)
class _ConvBlock(tfe.Network):
"""_ConvBlock is the block that has a conv layer at shortcut.
Args:
kernel_size: the kernel size of middle conv layer at main path
filters: list of integers, the filterss of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
data_format: data_format for the input ('channels_first' or
'channels_last').
strides: strides for the convolution. Note that from stage 3, the first
conv layer at main path is with strides=(2,2), and the shortcut should
have strides=(2,2) as well.
"""
def __init__(self,
kernel_size,
filters,
stage,
block,
data_format,
strides=(2, 2)):
super(_ConvBlock, self).__init__(name='')
filters1, filters2, filters3 = filters
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
bn_axis = 1 if data_format == 'channels_first' else 3
self.conv2a = self.track_layer(
tf.layers.Conv2D(
filters1, (1, 1),
strides=strides,
name=conv_name_base + '2a',
data_format=data_format))
self.bn2a = self.track_layer(
tf.layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a'))
self.conv2b = self.track_layer(
tf.layers.Conv2D(
filters2,
kernel_size,
padding='same',
name=conv_name_base + '2b',
data_format=data_format))
self.bn2b = self.track_layer(
tf.layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b'))
self.conv2c = self.track_layer(
tf.layers.Conv2D(
filters3, (1, 1),
name=conv_name_base + '2c',
data_format=data_format))
self.bn2c = self.track_layer(
tf.layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2c'))
self.conv_shortcut = self.track_layer(
tf.layers.Conv2D(
filters3, (1, 1),
strides=strides,
name=conv_name_base + '1',
data_format=data_format))
self.bn_shortcut = self.track_layer(
tf.layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '1'))
def call(self, input_tensor, training=False):
x = self.conv2a(input_tensor)
x = self.bn2a(x, training=training)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x, training=training)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x, training=training)
shortcut = self.conv_shortcut(input_tensor)
shortcut = self.bn_shortcut(shortcut, training=training)
x += shortcut
return tf.nn.relu(x)
class ResNet50(tfe.Network):
"""Instantiates the ResNet50 architecture.
Args:
data_format: format for the image. Either 'channels_first' or
'channels_last'. 'channels_first' is typically faster on GPUs while
'channels_last' is typically faster on CPUs. See
https://www.tensorflow.org/performance/performance_guide#data_formats
name: Prefix applied to names of variables created in the model.
trainable: Is the model trainable? If true, performs backward
and optimization after call() method.
include_top: whether to include the fully-connected layer at the top of the
network.
pooling: Optional pooling mode for feature extraction when `include_top`
is `False`.
- `None` means that the output of the model will be the 4D tensor
output of the last convolutional layer.
- `avg` means that global average pooling will be applied to the output of
the last convolutional layer, and thus the output of the model will be
a 2D tensor.
- `max` means that global max pooling will be applied.
classes: optional number of classes to classify images into, only to be
specified if `include_top` is True.
Raises:
ValueError: in case of invalid argument for data_format.
"""
def __init__(self,
data_format,
name=None,
trainable=True,
include_top=True,
pooling=None,
classes=1000):
super(ResNet50, self).__init__(name='')
valid_channel_values = ('channels_first', 'channels_last')
if data_format not in valid_channel_values:
raise ValueError('Unknown data_format: %s. Valid values: %s' %
(data_format, valid_channel_values))
self.include_top = include_top
def conv_block(filters, stage, block, strides=(2, 2)):
l = _ConvBlock(
3,
filters,
stage=stage,
block=block,
data_format=data_format,
strides=strides)
return self.track_layer(l)
def id_block(filters, stage, block):
l = _IdentityBlock(
3, filters, stage=stage, block=block, data_format=data_format)
return self.track_layer(l)
self.conv1 = self.track_layer(
tf.layers.Conv2D(
64, (7, 7),
strides=(2, 2),
data_format=data_format,
padding='same',
name='conv1'))
bn_axis = 1 if data_format == 'channels_first' else 3
self.bn_conv1 = self.track_layer(
tf.layers.BatchNormalization(axis=bn_axis, name='bn_conv1'))
self.max_pool = self.track_layer(
tf.layers.MaxPooling2D((3, 3), strides=(2, 2), data_format=data_format))
self.l2a = conv_block([64, 64, 256], stage=2, block='a', strides=(1, 1))
self.l2b = id_block([64, 64, 256], stage=2, block='b')
self.l2c = id_block([64, 64, 256], stage=2, block='c')
self.l3a = conv_block([128, 128, 512], stage=3, block='a')
self.l3b = id_block([128, 128, 512], stage=3, block='b')
self.l3c = id_block([128, 128, 512], stage=3, block='c')
self.l3d = id_block([128, 128, 512], stage=3, block='d')
self.l4a = conv_block([256, 256, 1024], stage=4, block='a')
self.l4b = id_block([256, 256, 1024], stage=4, block='b')
self.l4c = id_block([256, 256, 1024], stage=4, block='c')
self.l4d = id_block([256, 256, 1024], stage=4, block='d')
self.l4e = id_block([256, 256, 1024], stage=4, block='e')
self.l4f = id_block([256, 256, 1024], stage=4, block='f')
self.l5a = conv_block([512, 512, 2048], stage=5, block='a')
self.l5b = id_block([512, 512, 2048], stage=5, block='b')
self.l5c = id_block([512, 512, 2048], stage=5, block='c')
self.avg_pool = self.track_layer(
tf.layers.AveragePooling2D(
(7, 7), strides=(7, 7), data_format=data_format))
if self.include_top:
self.fc1000 = self.track_layer(
tf.layers.Dense(classes, name='fc1000'))
else:
reduction_indices = [1, 2] if data_format == 'channels_last' else [2, 3]
reduction_indices = tf.constant(reduction_indices)
if pooling == 'avg':
self.global_pooling = functools.partial(
tf.reduce_mean,
reduction_indices=reduction_indices,
keep_dims=False)
elif pooling == 'max':
self.global_pooling = functools.partial(
tf.reduce_max, reduction_indices=reduction_indices, keep_dims=False)
else:
self.global_pooling = None
def call(self, input_tensor, training=False):
x = self.conv1(input_tensor)
x = self.bn_conv1(x, training=training)
x = tf.nn.relu(x)
x = self.max_pool(x)
x = self.l2a(x, training=training)
x = self.l2b(x, training=training)
x = self.l2c(x, training=training)
x = self.l3a(x, training=training)
x = self.l3b(x, training=training)
x = self.l3c(x, training=training)
x = self.l3d(x, training=training)
x = self.l4a(x, training=training)
x = self.l4b(x, training=training)
x = self.l4c(x, training=training)
x = self.l4d(x, training=training)
x = self.l4e(x, training=training)
x = self.l4f(x, training=training)
x = self.l5a(x, training=training)
x = self.l5b(x, training=training)
x = self.l5c(x, training=training)
x = self.avg_pool(x)
if self.include_top:
return self.fc1000(tf.layers.flatten(x))
elif self.global_pooling:
return self.global_pooling(x)
else:
return x
| |
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
from PyQt5.QtCore import QThread
from PyQt5.QtCore import QUrl
from PyQt5.QtCore import pyqtSignal
from PyQt5.QtGui import QDesktopServices
from PyQt5.QtGui import QFont
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QFrame
from PyQt5.QtWidgets import QGridLayout
from PyQt5.QtWidgets import QGroupBox
from PyQt5.QtWidgets import QHBoxLayout
from PyQt5.QtWidgets import QInputDialog
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtWidgets import QVBoxLayout
from rsapp.gui.style import Style
from rsapp.gui.widgets import ParaLine, ParaWidget, WorkWidget, Answer
from rspub.core.importer import Importer
from rspub.core.rs_paras import RsParameters
from rspub.util.observe import EventObserver
LOG = logging.getLogger(__name__)
class ImportFrame(QFrame):
def __init__(self, parent, index=-1):
super().__init__(parent)
self.index = index
self.ctrl = QApplication.instance().ctrl
self.ctrl.switch_language.connect(self.on_switch_language)
self.ctrl.switch_configuration.connect(self.on_switch_configuration)
self.ctrl.switch_tab.connect(self.on_switch_tab)
self.import_widget = None
self.init_ui()
self.on_switch_language(self.ctrl.current_language())
self.on_switch_configuration()
def init_ui(self):
vbl_0 = QVBoxLayout(self)
self.label_title = QLabel(self)
font = QFont()
font.setPointSize(18)
font.setBold(True)
self.label_title.setFont(font)
self.label_title.setContentsMargins(2, 5, 5, 7)
self.label_title.setStyleSheet(Style.h2())
lbl_color = QLabel(" ", self)
lbl_color.setStyleSheet(Style.import_title())
hbox1 = QHBoxLayout()
hbox1.addWidget(lbl_color)
hbox1.addWidget(self.label_title, 1)
self.btn_help = QPushButton(_("Help..."), self)
self.btn_help.clicked.connect(self.on_button_help_clicked)
hbox1.addWidget(self.btn_help)
hbox1.setContentsMargins(0, 0, 0, 5)
vbl_0.addLayout(hbox1)
vbl_0.insertSpacing(2, 25)
# # scp group
grid1 = QGridLayout()
grid1.setContentsMargins(0, 0, 0, 0) # left, top, right, bottom
grid1.setVerticalSpacing(5)
grid1.setHorizontalSpacing(10)
self.grp_scp = QGroupBox(_("Import files with Secure Copy Protocol (scp)"))
vbox1 = QVBoxLayout()
self.para_scp_widgets = {
"imp_scp_server": ParaLine(self, "imp_scp_server", ParaWidget.str_conv(), grid1, 3, False),
"imp_scp_port": ParaLine(self, "imp_scp_port", ParaWidget.int_conv(), grid1, 5, False, width=100),
"imp_scp_user": ParaLine(self, "imp_scp_user", ParaWidget.str_conv(), grid1, 7, False),
"imp_scp_remote_path": ParaLine(self, "imp_scp_remote_path", ParaWidget.str_conv(), grid1, 9, False),
"imp_scp_local_path": ParaLine(self, "imp_scp_local_path", ParaWidget.str_conv(), grid1, 11, True),
}
self.grp_scp.setLayout(vbox1)
vbox1.addLayout(grid1)
hbox_scp = QHBoxLayout()
hbox_scp.addStretch(1)
self.scp_button_start = QPushButton(_("Start"))
self.scp_button_start.clicked.connect(self.on_scp_button_start_clicked)
hbox_scp.addWidget(self.scp_button_start)
vbox1.addLayout(hbox_scp)
vbl_0.addWidget(self.grp_scp)
vbl_0.addStretch(1)
self.setLayout(vbl_0)
def on_button_help_clicked(self):
link = "http://rspub-gui.readthedocs.io/en/latest/rst/rsgui.import.html"
QDesktopServices.openUrl(QUrl(link))
def on_switch_language(self, code=None):
LOG.debug("Switch language: %s" % code)
self.label_title.setText(_("Import resources"))
self.btn_help.setText(_("Help..."))
self.grp_scp.setTitle(_("Import files with Secure Copy Protocol (scp)"))
self.scp_button_start.setText(_("Start"))
def on_switch_configuration(self, name=None):
LOG.debug("Switch configuration: %s" % name)
def on_switch_tab(self, from_index, to_index):
pass
def on_scp_button_start_clicked(self):
self.activate_worker()
def activate_worker(self):
if self.import_widget:
self.import_widget.close()
self.import_widget.destroy()
self.import_widget = ImportWidget()
self.import_widget.work_started.connect(self.on_work_started)
self.import_widget.work_ended.connect(self.on_work_ended)
def on_work_started(self):
self.scp_button_start.setEnabled(False)
def on_work_ended(self):
self.scp_button_start.setEnabled(True)
def close(self):
LOG.debug("ImportFrame closing")
if self.import_widget:
self.import_widget.save_dimensions()
def translatables(self):
# parameter labels
_("imp_scp_server_label")
_("imp_scp_port_label")
_("imp_scp_user_label")
_("imp_scp_remote_path_label")
_("imp_scp_local_path_label")
class ImportWidget(WorkWidget):
def __init__(self):
WorkWidget.__init__(self, work="Import", title_style=Style.import_title())
self.chk_trial_run.setVisible(False)
_("Import")
def on_btn_run_clicked(self):
password = "secret"
dlg = QInputDialog(self)
dlg.setInputMode(QInputDialog.TextInput)
dlg.setWindowTitle(_("Connecting to %s") % self.paras.imp_scp_server)
dlg.setLabelText(_("Password for %s@%s:") % (self.paras.imp_scp_user, self.paras.imp_scp_server))
dlg.setTextEchoMode(QLineEdit.Password)
if dlg.exec_():
password = dlg.textValue()
else:
return
super(ImportWidget, self).on_btn_run_clicked()
self.executor_thread = ImportThread(self.paras, password, self)
self.executor_thread.signal_exception.connect(self.on_signal_exception)
self.executor_thread.ask_confirmation.connect(self.on_ask_confirmation)
self.executor_thread.signal_main_event.connect(self.on_signal_main_event)
self.executor_thread.signal_minor_event.connect(self.on_signal_minor_event)
self.executor_thread.signal_next_file.connect(self.on_signal_next_file)
self.executor_thread.signal_end_processing.connect(self.on_signal_end_processing)
self.executor_thread.finished.connect(self.on_executor_thread_finished)
self.executor_thread.start()
self.update()
class ImportThread(QThread, EventObserver):
signal_exception = pyqtSignal(str)
signal_main_event = pyqtSignal(str)
signal_minor_event = pyqtSignal(str)
signal_next_file = pyqtSignal(str)
ask_confirmation = pyqtSignal(str, str, Answer)
signal_end_processing = pyqtSignal(RsParameters)
def __init__(self, paras, password="secret", parent=None):
QThread.__init__(self, parent)
EventObserver.__init__(self)
self.paras = paras
self.password = password
self.scp_count = 0
def run(self):
LOG.debug("Importer thread started %s" % self)
self.scp_count = 0
importer = None
try:
importer = Importer(self.paras, self.password)
importer.register(self)
LOG.debug("Starting import")
importer.scp_get()
self.signal_end_processing.emit(self.paras)
except Exception as err:
LOG.exception("Exception in import thread:")
self.signal_exception.emit(_("Exception in importer thread: {0}").format(err))
finally:
if importer:
importer.unregister(self)
def pass_inform(self, *args, **kwargs):
print(">>>>> inform >>>>>>", args, kwargs)
def pass_confirm(self, *args, **kwargs):
print(">>>>> confirm >>>>>", args, kwargs)
return True
def inform_import_start(self, *args, **kwargs):
txt = _("Importing files...")
self.signal_main_event.emit(txt)
def inform_scp_exception(self, *args, **kwargs):
exception = kwargs["exception"]
txt = "SCP exception: "
txt += exception
self.signal_exception.emit(txt)
def inform_ssh_client_creation(self, *args, **kwargs):
# server, port, user
txt = _("Creating ssh client. ")
txt += " server: " + kwargs["server"]
txt += " port: " + str(kwargs["port"])
txt += " user: " + kwargs["user"]
self.signal_main_event.emit(txt)
def inform_scp_progress(self, *args, **kwargs):
filename = kwargs["filename"]
size = kwargs["size"]
sent = kwargs["sent"]
perc = sent / size
percstr = "{:.0%}".format(perc).rjust(5)
txt = " | " + percstr + " | " + filename
self.signal_next_file.emit(txt)
def inform_scp_transfer_complete(self, *args, **kwargs):
filename = kwargs["filename"]
count_imports = kwargs["count_imports"]
txt = "<code>imported: "
txt += str(count_imports) + " "
txt += filename
txt += "</code>"
self.signal_minor_event.emit(txt)
def inform_import_end(self, *args, **kwargs):
count_imports = kwargs["count_imports"]
count_errors = kwargs["count_errors"]
txt = "<hr>"
txt += _("End import.")
txt += "<table>"
txt += "<tr><td>"
txt += _("imports") + " "
txt += "</td><td>"
txt += str(count_imports)
txt += "</td></tr><tr><td>"
txt += _("errors") + " "
txt += "</td><td>"
txt += str(count_errors)
txt += "</td></tr></table><br/><br/>"
self.signal_main_event.emit(txt)
def confirm_transfer_file(self, *args, **kwargs):
if self.isInterruptionRequested():
self.signal_exception.emit(_("Process interrupted by user"))
return not self.isInterruptionRequested()
| |
#SpeakPython allows developers to add speech recognition support to their Python applications
#Copyright (C) 2015 Eric Matthews
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import pickle
import sqlite3
import sys
import os.path
from Result import Result
class SpeakPython(object):
db = '';
functionNameStack = [];
DEBUG_VAL = 3;
def __init__(self, database, DEBUG=3):
self.db = database;
self.DEBUG_VAL = 3;
self.DEBUG_VAL = DEBUG;
if not os.path.isfile(self.db):
self.debugMsg(self.db + " does not exist.", 10);
def debugMsg(self, msg, severity):
if self.DEBUG_VAL <= severity:
print "DEBUG(" + str(severity) + "): SpeakPython.py: " + str(msg) + "\n";
#returns the best-matched result out of the labels of matched regexes
def getBestResult(self, results, varDict):
#for each result, find the first result that is covered by the variable dictionary (since results are sorted from longest set of labels to smallest)
for result in results:
if result.isCoveredBy(varDict):
result.setVariables(varDict);
return result;
return None;
#gets the value of a g_# variable if exists
#input:
#g - current variable
#matchGroupDict - dictionary of variable matches from regex
#output: string value of the g_# variable or None if not g_#
def getExpandedNums(self, g, matchGroupDict):
#check numbered groups
m = re.match(r'g_([0-9]+)', g);
if m != None:
newG = m.group(1);
return (g, matchGroupDict[g]);
return (g, None);
def getResultForFunction(self, cursor, g, groupDict):
funcVarDict = {}
#for each key in matchGroup that matches the current function
for key in groupDict:
#get the expanded form of the variable (if it is another function, this will recurse to this function to find the result of that function)
(subKey, subResult) = self.getExpandedForm(cursor, key, groupDict);
#if there aren't any recorded variables, pop recorded variables and continue
if subResult == None:
continue;
funcVarDict[subKey] = subResult;
print "funcVarDict: " + str(funcVarDict);
#load results from db by function name
funcResults = cursor.execute("SELECT results FROM functions WHERE name=?", [funcName]);
funcResults = pickle.loads(cursor.fetchone()[0]);
#find the result based on the function's result coverage (just like a match) default to raw regex match
resultStr = groupDict[g];
#get the function result that best matches the labels
result = self.getBestResult(funcResults, funcVarDict);
if result != None:
resultStr = result.getResult();
return resultStr;
#gets the value of a function variable if exists, returns the value
#input:
#cursor - allows query of function results
#g - current variable
#matchGroupDict - dictionary of variable matches from regex (with previous function paths removed)
# therefore, only variables relating to this function
#
#output: string value of the function variable after evaluation, None if not a function variable
def getExpandedFunctions(self, cursor, g, matchGroupDict):
#check function groups
#find functions of the form "_{name}_{num}_" and simplify them for the result lists
m = re.match(r'_([a-zA-Z]+)_([0-9]+)_(.*)', g);
if m != None:
funcName = m.group(1);
funcNum = m.group(2);
localizedDict = {};
#key is a function variable
#create copy of dictionary terms for function
#remove function prefix from those terms
for key in matchGroupDict:
#if key is function variable
m = re.match('_' + funcName + '_' + funcNum + '_(.*)');
if m != None:
globalVarName = m.group(0);
localVarName = m.group(1);
localizedDict[localVarName] = matchGroupDict[globalVarName];
#keep a stack record of function names
self.functionNameStack.push(funcName);
#get result of function
functionResult = self.getResultForFunction(cursor, g, funcName, funcNum, localizedDict);
#remove function name from stack
self.functionNameStack.pop();
#return so as not to record function as variable
return (funcName, functionResult);
return (g, None);
#gets the value of a variable if exists, returns the value
#input:
#g - current variable
#matchGroupDict - dictionary of variable matches from regex (with previous function paths removed)
# therefore, only variables relating to current function in the stack matter
#output: string value of the variable, None if not a variable
def getExpandedVars(self, g, matchGroupDict):
m = re.match('[a-zA-Z0-9]+', g);
if m != None:
return (g, matchGroupDict[g]);
return (g, None);
#using a cursor and an index, we get the regex associated with the function or matchID from DB and return it
def getKleeneRegex(self, c, index):
kGroupRegexes = {};
c.execute("SELECT regexes FROM kleene WHERE id=?", [(index)])
kMatch = c.fetchone();
if kMatch != None:
kGroupRegexes = pickle.loads(kMatch[0]);
else:
kGroupRegexes = {};
return kGroupRegexes;
#gets the values of a kleene if exists, returns the value
#input:
#g - current variable
#cursor - used to query database
#matchGroupDict - dictionary of variable matches from regex (with previous function paths removed)
# therefore, only variables relating to current function in the stack matter
#output: a dictionary containing all the localized variables within the kleene, None if not a kleene
def getExpandedKleene(self, cursor, g, matchGroupDict):
#see if it is a kleene variable
m = re.match(r'k_([0-9]+)', g);
if m != None:
kNum = m.group(1);
#get all that was matched by kleene in regex
rawMatchStr = matchGroupDict[g];
#find the regex inside kleene, from database table kleene, that was used to get the rawMatchStr
#see if we should use function kleene or match kleene and handle appropriately
if len(self.functionNameStack) > 0:
funcName = self.functionNameStack[-1];
funcKleeneRegexes = self.getKleeneRegex(cursor, funcName);
innerRegex = funcKleeneRegexes[kNum];
else:
innerRegex = self.getKleeneRegex(kNum);
#use the inner regex multiple times to retrieve a list of results used for each variable
retDict = {};
if innerRegex != None:
origInnerRegex = innerRegex;
delim = " /,";
#alter innerRegex to match spaces so as to get rid of them in next iteration
innerRegex = origInnerRegex + "[" + delim + "]*";
#search for the regex multiple times within the string
#shorten the string each iteration by what was previously matched
kMatch = re.match(innerRegex, rawMatchStr);
while (kMatch != None):
innerVars = kMatch.groupdict();
#get raw match string
innerMatchStr = kMatch.group(0);
print "innerVars: " + str(innerVars);
kleeneDict = {};
#for each variable of the inner regex, expand it to its proper form (functions turn into function results, kleene are recursed, etc)
for var in innerVars:
(key, value) = self.getExpandedForm(cursor, var, innerVars);
kleeneDict[key] = value;
print "kleeneDict: " + str(kleeneDict);
#accumulate each variable in the kleene into a list of those variables' values
#this accumulation is retDict
for var in kleeneDict:
if var in retDict:
retDict[var].append( expandedDict[var] );
else:
retDict[var] = [ expandedDict[var] ];
#cut off previous match from string, and re-search the regex
rawMatchStr = rawMatchStr[len(innerMatchStr):];
kMatch = re.match(innerRegex, rawMatchStr);
return (g, retDict);
return (g, None);
def getExpandedForm(self, cursor, g, matchGroupDict):
#remove all None or empty regex group matches
val = matchGroupDict[g];
if val == '' or val == None:
return (g, None);
#expand
ret = self.getExpandedNums(g, matchGroupDict):
if ret != None:
return ret;
ret = self.getExpandedKleene(cursor, g, matchGroupDict):
if ret != None:
return ret;
ret = self.getExpandedFunctions(cursor, g, matchGroupDict):
if ret != None:
return ret;
ret = self.getExpandedVars(g, matchGroupDict):
if ret != None:
return ret;
return (g, None);
def init(self):
self.functionNameStack = {};
#returns a result that matches the regex
def matchResult(self, inStr):
self.init();
conn = sqlite3.connect(self.db);
c = conn.cursor();
keyword = inStr[:inStr.find(' ')];
keyword = keyword.lower();
matches = c.execute("SELECT order_id, regex, results FROM matches WHERE keywords LIKE '%'||?||'%' OR keywords LIKE '%*%' ORDER BY order_id", [keyword]);
# longestResult = None;
# longestResultLen = 0;
#for each match, find the match result that covers the most labels of all match regexes that the input fits
for match in matches:
matchID = match[0];
regex = match[1];
#match regex and get capture groups
m = re.match(regex, inStr);
#regex doesn't match, stop
if m == None:
continue;
#regex matches, get groups
self.debugMsg(regex, 1);
matchGroupDict = m.groupdict();
groupDict = {};
# self.debugMsg("raw matches: " + matchGroupDict, 1);
#get the expanded form of a variable/function for use as a result label
for g in matchGroupDict:
(key, value) = self.getExpandedForm(c, g, matchGroupDict);
if value != None:
groupDict[key] = value;
results = match[2]; #load pickled results from db
results = pickle.loads(results); #unpickle them
#for each result, see if match covers the label, find the match-result pair that covers the most labels
bestResult = self.getBestResult(results, groupDict);
#for each top-level match
#see which phrase match result has the largest amount of labels. Keep that one.
if bestResult != None:
if longestResultLen < len(bestResult.labels):
longestResult = bestResult;
longestResultLen = len(bestResult.labels);
return longestResult;
| |
# -*- test-case-name: twisted.test.test_reflector -*-
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.enterprise import reflector
from twisted.enterprise.util import DBError, getKeyColumn, quote, safe
from twisted.enterprise.util import _TableInfo
from twisted.enterprise.row import RowObject
from twisted.python import reflect
class SQLReflector(reflector.Reflector):
"""
DEPRECATED.
I reflect on a database and load RowObjects from it.
In order to do this, I interrogate a relational database to
extract schema information and interface with RowObject class
objects that can interact with specific tables.
"""
populated = 0
conditionalLabels = {
reflector.EQUAL : "=",
reflector.LESSTHAN : "<",
reflector.GREATERTHAN : ">",
reflector.LIKE : "like"
}
def __init__(self, dbpool, rowClasses):
"""Initialize me against a database.
"""
reflector.Reflector.__init__(self, rowClasses)
self.dbpool = dbpool
def _populate(self):
self._transPopulateSchema()
def _transPopulateSchema(self):
"""Used to construct the row classes in a single interaction.
"""
for rc in self.rowClasses:
if not issubclass(rc, RowObject):
raise DBError("Stub class (%s) is not derived from RowObject" % reflect.qual(rc.rowClass))
self._populateSchemaFor(rc)
self.populated = 1
def _populateSchemaFor(self, rc):
"""Construct all the SQL templates for database operations on
<tableName> and populate the class <rowClass> with that info.
"""
attributes = ("rowColumns", "rowKeyColumns", "rowTableName" )
for att in attributes:
if not hasattr(rc, att):
raise DBError("RowClass %s must have class variable: %s" % (rc, att))
tableInfo = _TableInfo(rc)
tableInfo.updateSQL = self.buildUpdateSQL(tableInfo)
tableInfo.insertSQL = self.buildInsertSQL(tableInfo)
tableInfo.deleteSQL = self.buildDeleteSQL(tableInfo)
self.populateSchemaFor(tableInfo)
def escape_string(self, text):
"""Escape a string for use in an SQL statement. The default
implementation escapes ' with '' and \ with \\. Redefine this
function in a subclass if your database server uses different
escaping rules.
"""
return safe(text)
def quote_value(self, value, type):
"""Format a value for use in an SQL statement.
@param value: a value to format as data in SQL.
@param type: a key in util.dbTypeMap.
"""
return quote(value, type, string_escaper=self.escape_string)
def loadObjectsFrom(self, tableName, parentRow=None, data=None,
whereClause=None, forceChildren=0):
"""Load a set of RowObjects from a database.
Create a set of python objects of <rowClass> from the contents
of a table populated with appropriate data members.
Example::
| class EmployeeRow(row.RowObject):
| pass
|
| def gotEmployees(employees):
| for emp in employees:
| emp.manager = "fred smith"
| manager.updateRow(emp)
|
| reflector.loadObjectsFrom("employee",
| data = userData,
| whereClause = [("manager" , EQUAL, "fred smith")]
| ).addCallback(gotEmployees)
NOTE: the objects and all children should be loaded in a single transaction.
NOTE: can specify a parentRow _OR_ a whereClause.
"""
if parentRow and whereClause:
raise DBError("Must specify one of parentRow _OR_ whereClause")
if parentRow:
info = self.getTableInfo(parentRow)
relationship = info.getRelationshipFor(tableName)
whereClause = self.buildWhereClause(relationship, parentRow)
elif whereClause:
pass
else:
whereClause = []
return self.dbpool.runInteraction(self._rowLoader, tableName,
parentRow, data, whereClause,
forceChildren)
def _rowLoader(self, transaction, tableName, parentRow, data,
whereClause, forceChildren):
"""immediate loading of rowobjects from the table with the whereClause.
"""
tableInfo = self.schema[tableName]
# Build the SQL for the query
sql = "SELECT "
first = 1
for column, type in tableInfo.rowColumns:
if first:
first = 0
else:
sql = sql + ","
sql = sql + " %s" % column
sql = sql + " FROM %s " % (tableName)
if whereClause:
sql += " WHERE "
first = 1
for wItem in whereClause:
if first:
first = 0
else:
sql += " AND "
(columnName, cond, value) = wItem
t = self.findTypeFor(tableName, columnName)
quotedValue = self.quote_value(value, t)
sql += "%s %s %s" % (columnName, self.conditionalLabels[cond],
quotedValue)
# execute the query
transaction.execute(sql)
rows = transaction.fetchall()
# construct the row objects
results = []
newRows = []
for args in rows:
kw = {}
for i in range(0,len(args)):
ColumnName = tableInfo.rowColumns[i][0].lower()
for attr, type in tableInfo.rowClass.rowColumns:
if attr.lower() == ColumnName:
kw[attr] = args[i]
break
# find the row in the cache or add it
resultObject = self.findInCache(tableInfo.rowClass, kw)
if not resultObject:
meth = tableInfo.rowFactoryMethod[0]
resultObject = meth(tableInfo.rowClass, data, kw)
self.addToCache(resultObject)
newRows.append(resultObject)
results.append(resultObject)
# add these rows to the parentRow if required
if parentRow:
self.addToParent(parentRow, newRows, tableName)
# load children or each of these rows if required
for relationship in tableInfo.relationships:
if not forceChildren and not relationship.autoLoad:
continue
for row in results:
# build where clause
childWhereClause = self.buildWhereClause(relationship, row)
# load the children immediately, but do nothing with them
self._rowLoader(transaction,
relationship.childRowClass.rowTableName,
row, data, childWhereClause, forceChildren)
return results
def findTypeFor(self, tableName, columnName):
tableInfo = self.schema[tableName]
columnName = columnName.lower()
for column, type in tableInfo.rowColumns:
if column.lower() == columnName:
return type
def buildUpdateSQL(self, tableInfo):
"""(Internal) Build SQL template to update a RowObject.
Returns: SQL that is used to contruct a rowObject class.
"""
sql = "UPDATE %s SET" % tableInfo.rowTableName
# build update attributes
first = 1
for column, type in tableInfo.rowColumns:
if getKeyColumn(tableInfo.rowClass, column):
continue
if not first:
sql = sql + ", "
sql = sql + " %s = %s" % (column, "%s")
first = 0
# build where clause
first = 1
sql = sql + " WHERE "
for keyColumn, type in tableInfo.rowKeyColumns:
if not first:
sql = sql + " AND "
sql = sql + " %s = %s " % (keyColumn, "%s")
first = 0
return sql
def buildInsertSQL(self, tableInfo):
"""(Internal) Build SQL template to insert a new row.
Returns: SQL that is used to insert a new row for a rowObject
instance not created from the database.
"""
sql = "INSERT INTO %s (" % tableInfo.rowTableName
# build column list
first = 1
for column, type in tableInfo.rowColumns:
if not first:
sql = sql + ", "
sql = sql + column
first = 0
sql = sql + " ) VALUES ("
# build values list
first = 1
for column, type in tableInfo.rowColumns:
if not first:
sql = sql + ", "
sql = sql + "%s"
first = 0
sql = sql + ")"
return sql
def buildDeleteSQL(self, tableInfo):
"""Build the SQL template to delete a row from the table.
"""
sql = "DELETE FROM %s " % tableInfo.rowTableName
# build where clause
first = 1
sql = sql + " WHERE "
for keyColumn, type in tableInfo.rowKeyColumns:
if not first:
sql = sql + " AND "
sql = sql + " %s = %s " % (keyColumn, "%s")
first = 0
return sql
def updateRowSQL(self, rowObject):
"""Build SQL to update the contents of rowObject.
"""
args = []
tableInfo = self.schema[rowObject.rowTableName]
# build update attributes
for column, type in tableInfo.rowColumns:
if not getKeyColumn(rowObject.__class__, column):
args.append(self.quote_value(rowObject.findAttribute(column),
type))
# build where clause
for keyColumn, type in tableInfo.rowKeyColumns:
args.append(self.quote_value(rowObject.findAttribute(keyColumn),
type))
return self.getTableInfo(rowObject).updateSQL % tuple(args)
def updateRow(self, rowObject):
"""Update the contents of rowObject to the database.
"""
sql = self.updateRowSQL(rowObject)
rowObject.setDirty(0)
return self.dbpool.runOperation(sql)
def insertRowSQL(self, rowObject):
"""Build SQL to insert the contents of rowObject.
"""
args = []
tableInfo = self.schema[rowObject.rowTableName]
# build values
for column, type in tableInfo.rowColumns:
args.append(self.quote_value(rowObject.findAttribute(column),type))
return self.getTableInfo(rowObject).insertSQL % tuple(args)
def insertRow(self, rowObject):
"""Insert a new row for rowObject.
"""
rowObject.setDirty(0)
sql = self.insertRowSQL(rowObject)
return self.dbpool.runOperation(sql)
def deleteRowSQL(self, rowObject):
"""Build SQL to delete rowObject from the database.
"""
args = []
tableInfo = self.schema[rowObject.rowTableName]
# build where clause
for keyColumn, type in tableInfo.rowKeyColumns:
args.append(self.quote_value(rowObject.findAttribute(keyColumn),
type))
return self.getTableInfo(rowObject).deleteSQL % tuple(args)
def deleteRow(self, rowObject):
"""Delete the row for rowObject from the database.
"""
sql = self.deleteRowSQL(rowObject)
self.removeFromCache(rowObject)
return self.dbpool.runOperation(sql)
__all__ = ['SQLReflector']
| |
#!/usr/bin/env python
# vim: sw=2 ts=2
import click
import os
import sys
@click.command()
### Cluster options
@click.option('--console-port', default='443', type=click.IntRange(1,65535), help='OpenShift web console port',
show_default=True)
@click.option('--deployment-type', default='openshift-enterprise', help='OpenShift deployment type',
show_default=True)
@click.option('--openshift-sdn', default='redhat/openshift-ovs-multitenant', type=click.Choice(['redhat/openshift-ovs-subnet', 'redhat/openshift-ovs-multitenant']), help='OpenShift SDN',
show_default=True)
### AWS/EC2 options
@click.option('--glusterfs-stack-name', help='Specify a gluster stack name. Making the name unique will allow for multiple deployments',
show_default=True)
@click.option('--region', default='us-east-1', help='ec2 region',
show_default=True)
@click.option('--ami', default='ami-fbc89880', help='ec2 ami',
show_default=True)
@click.option('--node-instance-type', default='m4.2xlarge', help='ec2 instance type',
show_default=True)
@click.option('--use-cloudformation-facts', is_flag=True, help='Use cloudformation to populate facts. Requires Deployment >= OCP 3.5',
show_default=True)
@click.option('--keypair', help='ec2 keypair name',
show_default=True)
@click.option('--private-subnet-id1', help='Specify a Private subnet within the existing VPC',
show_default=True)
@click.option('--private-subnet-id2', help='Specify a Private subnet within the existing VPC',
show_default=True)
@click.option('--private-subnet-id3', help='Specify a Private subnet within the existing VPC',
show_default=True)
@click.option('--glusterfs-volume-size', default='500', help='Gluster volume size in GB',
show_default=True)
@click.option('--glusterfs-volume-type', default='st1', help='Gluster volume type',
show_default=True)
@click.option('--iops', help='Specfify the IOPS for a volume (used only with IO1)',
show_default=True)
### DNS options
@click.option('--public-hosted-zone', help='hosted zone for accessing the environment')
### Subscription and Software options
@click.option('--rhsm-user', help='Red Hat Subscription Management User')
@click.option('--rhsm-password', help='Red Hat Subscription Management Password',
hide_input=True,)
@click.option('--rhsm-pool', help='Red Hat Subscription Management Pool Name')
### Miscellaneous options
@click.option('--containerized', default='False', help='Containerized installation of OpenShift',
show_default=True)
@click.option('--iam-role', help='Specify the name of the existing IAM Instance profile',
show_default=True)
@click.option('--node-sg', help='Specify the already existing node security group id',
show_default=True)
@click.option('--existing-stack', help='Specify the name of the existing CloudFormation stack')
@click.option('--no-confirm', is_flag=True,
help='Skip confirmation prompt')
@click.help_option('--help', '-h')
@click.option('-v', '--verbose', count=True)
def launch_refarch_env(region=None,
ami=None,
no_confirm=False,
node_instance_type=None,
glusterfs_stack_name=None,
keypair=None,
public_hosted_zone=None,
deployment_type=None,
console_port=443,
rhsm_user=None,
rhsm_password=None,
rhsm_pool=None,
containerized=None,
node_type=None,
private_subnet_id1=None,
private_subnet_id2=None,
private_subnet_id3=None,
glusterfs_volume_type=None,
glusterfs_volume_size=None,
openshift_sdn=None,
iops=None,
node_sg=None,
iam_role=None,
existing_stack=None,
use_cloudformation_facts=False,
verbose=0):
# Need to prompt for the R53 zone:
if public_hosted_zone is None:
public_hosted_zone = click.prompt('Hosted DNS zone for accessing the environment')
if existing_stack is None:
existing_stack = click.prompt('Specify the name of the existing CloudFormation stack')
if glusterfs_stack_name is None:
glusterfs_stack_name = click.prompt('Specify a unique name for the CNS CloudFormation stack')
# If no keypair is specified fail:
if keypair is None:
keypair = click.prompt('A SSH keypair must be specified or created')
# If the user already provided values, don't bother asking again
if deployment_type in ['openshift-enterprise'] and rhsm_user is None:
rhsm_user = click.prompt("RHSM username?")
if deployment_type in ['openshift-enterprise'] and rhsm_password is None:
rhsm_password = click.prompt("RHSM password?", hide_input=True)
if deployment_type in ['openshift-enterprise'] and rhsm_pool is None:
rhsm_pool = click.prompt("RHSM Pool ID or Subscription Name for OpenShift?")
# Prompt for vars if they are not defined
if use_cloudformation_facts and iam_role is None:
iam_role = "Computed by Cloudformations"
elif iam_role is None:
iam_role = click.prompt("Specify the IAM Role of the node?")
if use_cloudformation_facts and node_sg is None:
node_sg = "Computed by Cloudformations"
elif node_sg is None:
node_sg = click.prompt("Specify the Security Group for the nodes?")
if use_cloudformation_facts and private_subnet_id1 is None:
private_subnet_id1 = "Computed by Cloudformations"
elif private_subnet_id1 is None:
private_subnet_id1 = click.prompt("Specify the first private subnet for the nodes?")
if use_cloudformation_facts and private_subnet_id2 is None:
private_subnet_id2 = "Computed by Cloudformations"
elif private_subnet_id2 is None:
private_subnet_id2 = click.prompt("Specify the second private subnet for the nodes?")
if use_cloudformation_facts and private_subnet_id3 is None:
private_subnet_id3 = "Computed by Cloudformations"
elif private_subnet_id3 is None:
private_subnet_id3 = click.prompt("Specify the third private subnet for the nodes?")
if glusterfs_volume_type in ['io1']:
iops = click.prompt('Specify a numeric value for iops')
if iops is None:
iops = "NA"
# Hidden facts for infrastructure.yaml
create_key = "no"
create_vpc = "no"
add_node = "yes"
deploy_glusterfs = "true"
node_type = "glusterfs"
# Display information to the user about their choices
if use_cloudformation_facts:
click.echo('Configured values:')
click.echo('\tami: %s' % ami)
click.echo('\tregion: %s' % region)
click.echo('\tglusterfs_stack_name: %s' % glusterfs_stack_name)
click.echo('\tnode_instance_type: %s' % node_instance_type)
click.echo('\tglusterfs_volume_type: %s' % glusterfs_volume_type)
click.echo('\tglusterfs_volume_size: %s' % glusterfs_volume_size)
click.echo('\tiops: %s' % iops)
click.echo('\topenshift_sdn: %s' % openshift_sdn)
click.echo('\tkeypair: %s' % keypair)
click.echo('\tdeployment_type: %s' % deployment_type)
click.echo('\tpublic_hosted_zone: %s' % public_hosted_zone)
click.echo('\tconsole port: %s' % console_port)
click.echo('\trhsm_user: %s' % rhsm_user)
click.echo('\trhsm_password: *******')
click.echo('\trhsm_pool: %s' % rhsm_pool)
click.echo('\tcontainerized: %s' % containerized)
click.echo('\texisting_stack: %s' % existing_stack)
click.echo('\tSubnets, Security Groups, and IAM Roles will be gather from the CloudFormation')
click.echo("")
else:
click.echo('Configured values:')
click.echo('\tami: %s' % ami)
click.echo('\tregion: %s' % region)
click.echo('\tglusterfs_stack_name: %s' % glusterfs_stack_name)
click.echo('\tnode_instance_type: %s' % node_instance_type)
click.echo('\tprivate_subnet_id1: %s' % private_subnet_id1)
click.echo('\tprivate_subnet_id2: %s' % private_subnet_id2)
click.echo('\tprivate_subnet_id3: %s' % private_subnet_id3)
click.echo('\tglusterfs_volume_type: %s' % glusterfs_volume_type)
click.echo('\tglusterfs_volume_size: %s' % glusterfs_volume_size)
click.echo('\tiops: %s' % iops)
click.echo('\openshift_sdn: %s' % openshift_sdn)
click.echo('\tkeypair: %s' % keypair)
click.echo('\tkeypair: %s' % keypair)
click.echo('\tnode_sg: %s' % node_sg)
click.echo('\tdeployment_type: %s' % deployment_type)
click.echo('\tpublic_hosted_zone: %s' % public_hosted_zone)
click.echo('\tconsole port: %s' % console_port)
click.echo('\trhsm_user: %s' % rhsm_user)
click.echo('\trhsm_password: *******')
click.echo('\trhsm_pool: %s' % rhsm_pool)
click.echo('\tcontainerized: %s' % containerized)
click.echo('\tiam_role: %s' % iam_role)
click.echo('\texisting_stack: %s' % existing_stack)
click.echo("")
if not no_confirm:
click.confirm('Continue using these values?', abort=True)
playbooks = ['playbooks/infrastructure.yaml', 'playbooks/add-node.yaml']
for playbook in playbooks:
# hide cache output unless in verbose mode
devnull='> /dev/null'
if verbose > 0:
devnull=''
# refresh the inventory cache to prevent stale hosts from
# interferring with re-running
command='inventory/aws/hosts/ec2.py --refresh-cache %s' % (devnull)
os.system(command)
# remove any cached facts to prevent stale data during a re-run
command='rm -rf .ansible/cached_facts'
os.system(command)
if use_cloudformation_facts:
command='ansible-playbook -i inventory/aws/hosts -e \'region=%s \
ami=%s \
keypair=%s \
glusterfs_stack_name=%s \
add_node=yes \
node_instance_type=%s \
public_hosted_zone=%s \
deployment_type=%s \
console_port=%s \
rhsm_user=%s \
rhsm_password=%s \
rhsm_pool="%s" \
containerized=%s \
node_type=glusterfs \
key_path=/dev/null \
create_key=%s \
create_vpc=%s \
deploy_glusterfs=%s \
glusterfs_volume_type=%s \
glusterfs_volume_size=%s \
iops=%s \
openshift_sdn=%s \
stack_name=%s \' %s' % (region,
ami,
keypair,
glusterfs_stack_name,
node_instance_type,
public_hosted_zone,
deployment_type,
console_port,
rhsm_user,
rhsm_password,
rhsm_pool,
containerized,
create_key,
create_vpc,
deploy_glusterfs,
glusterfs_volume_type,
glusterfs_volume_size,
iops,
openshift_sdn,
existing_stack,
playbook)
else:
command='ansible-playbook -i inventory/aws/hosts -e \'region=%s \
ami=%s \
keypair=%s \
glusterfs_stack_name=%s \
add_node=yes \
node_sg=%s \
node_instance_type=%s \
private_subnet_id1=%s \
private_subnet_id2=%s \
private_subnet_id3=%s \
public_hosted_zone=%s \
deployment_type=%s \
console_port=%s \
rhsm_user=%s \
rhsm_password=%s \
rhsm_pool="%s" \
containerized=%s \
node_type=glusterfs \
iam_role=%s \
key_path=/dev/null \
create_key=%s \
create_vpc=%s \
deploy_glusterfs=%s \
glusterfs_volume_type=%s \
glusterfs_volume_size=%s \
iops=%s \
openshift_sdn=%s \
stack_name=%s \' %s' % (region,
ami,
keypair,
glusterfs_stack_name,
node_sg,
node_instance_type,
private_subnet_id1,
private_subnet_id2,
private_subnet_id3,
public_hosted_zone,
deployment_type,
console_port,
rhsm_user,
rhsm_password,
rhsm_pool,
containerized,
iam_role,
create_key,
create_vpc,
deploy_glusterfs,
glusterfs_volume_type,
glusterfs_volume_size,
iops,
openshift_sdn,
existing_stack,
playbook)
if verbose > 0:
command += " -" + "".join(['v']*verbose)
click.echo('We are running: %s' % command)
status = os.system(command)
if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
return os.WEXITSTATUS(status)
if __name__ == '__main__':
# check for AWS access info
if os.getenv('AWS_ACCESS_KEY_ID') is None or os.getenv('AWS_SECRET_ACCESS_KEY') is None:
print 'AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY **MUST** be exported as environment variables.'
sys.exit(1)
launch_refarch_env(auto_envvar_prefix='OSE_REFArch')
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import ast
from time import sleep
from subprocess import PIPE, STDOUT
from system_test import TestCase, Qdrouterd, main_module, TIMEOUT, Process
from system_test import AsyncTestReceiver
from system_test import unittest
from proton import Message, Timeout
from proton.reactor import AtMostOnce, AtLeastOnce
from proton.utils import BlockingConnection, SendException
#TIMEOUT=5
_EXCHANGE_TYPE = "org.apache.qpid.dispatch.router.config.exchange"
_BINDING_TYPE = "org.apache.qpid.dispatch.router.config.binding"
class ExchangeBindingsTest(TestCase):
"""
Tests the exchange/bindings of the dispatch router.
"""
def _create_router(self, name, config):
config = [
('router', {'mode': 'standalone', 'id': 'QDR.%s'%name}),
('listener', {'role': 'normal', 'host': '0.0.0.0',
'port': self.tester.get_port(),
'saslMechanisms':'ANONYMOUS'})
] + config
return self.tester.qdrouterd(name, Qdrouterd.Config(config))
def run_qdmanage(self, router, cmd, input=None, expect=Process.EXIT_OK):
p = self.popen(
['qdmanage'] + cmd.split(' ')
+ ['--bus', router.addresses[0], '--indent=-1', '--timeout', str(TIMEOUT)],
stdin=PIPE, stdout=PIPE, stderr=STDOUT, expect=expect,
universal_newlines=True)
out = p.communicate(input)[0]
try:
p.teardown()
except Exception as e:
raise Exception("%s\n%s" % (e, out))
return out
def _validate_entity(self, name, kind, entities, expected):
for entity in entities:
if "name" in entity and entity["name"] == name:
for k,v in expected.items():
self.assertTrue(k in entity)
self.assertEqual(v, entity[k])
return
raise Exception("Could not find %s named %s" % (kind, name))
def _validate_exchange(self, router, name, **kwargs):
_ = self.run_qdmanage(router, "query --type %s" % _EXCHANGE_TYPE)
self._validate_entity(name, "exchange", ast.literal_eval(_), kwargs)
def _validate_binding(self, router, name, **kwargs):
_ = self.run_qdmanage(router, "query --type %s" % _BINDING_TYPE)
self._validate_entity(name, "binding", ast.literal_eval(_), kwargs)
def test_qdmanage(self):
"""
Tests the management API via qdmanage
"""
router = self._create_router("A", [])
# create exchanges
ex_config = [
["Exchange1", {"address": "Address1"}],
["Exchange2", {"address": "Address2",
"phase": 2,
"alternateAddress": "Alternate2",
"alternatePhase": 1,
"matchMethod": "mqtt"}]
]
for cfg in ex_config:
args = ""
for k, v in cfg[1].items():
args += "%s=%s " % (k, v)
self.run_qdmanage(router,
"create --type %s --name %s %s" %
(_EXCHANGE_TYPE, cfg[0], args))
# validate
_ = self.run_qdmanage(router, "query --type %s" % _EXCHANGE_TYPE)
query = ast.literal_eval(_)
self.assertEqual(len(ex_config), len(query))
for cfg in ex_config:
self._validate_entity(name=cfg[0],
kind="exchange",
entities=query,
expected=cfg[1])
for ex in query:
self.assertEqual(0, ex['bindingCount'])
# create bindings
binding_config = [
["b11", {"exchangeName": "Exchange1",
"bindingKey": "a.b.*.#",
"nextHopAddress": "nextHop1",
"nextHopPhase": 3}],
["b12", {"exchangeName": "Exchange1",
"bindingKey": "a.*.c.#",
"nextHopAddress": "nextHop1",
"nextHopPhase": 3}],
["b13", {"exchangeName": "Exchange1",
"bindingKey": "a.b.*.#",
"nextHopAddress": "nextHop2",
"nextHopPhase": 0}],
["b14", {"exchangeName": "Exchange1",
"bindingKey": "a.*.c.#",
"nextHopAddress": "nextHop2",
"nextHopPhase": 0}],
["b21", {"exchangeName": "Exchange2",
"bindingKey": "a/b/?/#",
"nextHopAddress": "nextHop3"}],
["b22", {"exchangeName": "Exchange2",
"bindingKey": "a",
"nextHopAddress": "nextHop4"}],
["b23", {"exchangeName": "Exchange2",
"bindingKey": "a/b",
"nextHopAddress": "nextHop4"}],
["b24", {"exchangeName": "Exchange2",
"bindingKey": "b",
"nextHopAddress": "nextHop3"}]
]
for cfg in binding_config:
args = ""
for k, v in cfg[1].items():
args += "%s=%s " % (k, v)
self.run_qdmanage(router,
"create --type %s --name %s %s" %
(_BINDING_TYPE, cfg[0], args))
# validate
_ = self.run_qdmanage(router, "query --type %s" % _BINDING_TYPE)
bindings = ast.literal_eval(_)
self.assertEqual(len(binding_config), len(bindings))
for cfg in binding_config:
self._validate_entity(name=cfg[0],
kind="binding",
entities=bindings,
expected=cfg[1])
_ = self.run_qdmanage(router, "query --type %s" % _EXCHANGE_TYPE)
exchanges = ast.literal_eval(_)
self.assertEqual(len(ex_config), len(exchanges))
for ex in exchanges:
self.assertEqual(4, ex["bindingCount"])
# verify reads
_ = self.run_qdmanage(router, "read --type %s --name Exchange2" % _EXCHANGE_TYPE)
self.assertEqual("Exchange2", ast.literal_eval(_)["name"])
_ = self.run_qdmanage(router, "read --type %s --name b24" % _BINDING_TYPE)
self.assertEqual("b24", ast.literal_eval(_)["name"])
# binding deletion by id:
bid = bindings[0]["identity"]
self.run_qdmanage(router, "delete --type " + _BINDING_TYPE +
" --identity %s" % bid)
_ = self.run_qdmanage(router, "query --type %s" % _BINDING_TYPE)
bindings = ast.literal_eval(_)
self.assertEqual(len(binding_config) - 1, len(bindings))
for binding in bindings:
self.assertFalse(binding["identity"] == bid)
# binding deletion by name:
self.run_qdmanage(router, "delete --type " + _BINDING_TYPE +
" --name b14")
_ = self.run_qdmanage(router, "query --type %s" % _BINDING_TYPE)
bindings = ast.literal_eval(_)
self.assertEqual(len(binding_config) - 2, len(bindings))
for binding in bindings:
self.assertFalse(binding["name"] == "b14")
# exchange deletion by name:
self.run_qdmanage(router, "delete --type " + _EXCHANGE_TYPE +
" --name Exchange1")
_ = self.run_qdmanage(router, "query --type %s" % _EXCHANGE_TYPE)
exchanges = ast.literal_eval(_)
self.assertEqual(len(ex_config) - 1, len(exchanges))
self.assertEqual("Exchange2", exchanges[0]["name"])
# negative testing
# exchange name is required
self.assertRaises(Exception, self.run_qdmanage, router,
"create --type " + _EXCHANGE_TYPE +
" address=Nope")
# exchange address is required
self.assertRaises(Exception, self.run_qdmanage, router,
"create --type " + _EXCHANGE_TYPE +
" --name Nope")
# duplicate exchange names
self.assertRaises(Exception, self.run_qdmanage, router,
"create --type " + _EXCHANGE_TYPE +
" --name Exchange2 address=foo")
# invalid match method
self.assertRaises(Exception, self.run_qdmanage, router,
"create --type " + _EXCHANGE_TYPE +
" --name Exchange3 address=foo"
" matchMethod=blinky")
# duplicate exchange addresses
self.assertRaises(Exception, self.run_qdmanage, router,
"create --type " + _EXCHANGE_TYPE +
" --name Nope address=Address2")
# binding with no exchange name
self.assertRaises(Exception, self.run_qdmanage, router,
"create --type " + _BINDING_TYPE +
" --name Nope")
# binding with bad exchange name
self.assertRaises(Exception, self.run_qdmanage, router,
"create --type " + _BINDING_TYPE +
" exchangeName=Nope")
# binding with duplicate name
self.assertRaises(Exception, self.run_qdmanage, router,
"create --type " + _BINDING_TYPE +
" --name b22 exchangeName=Exchange2"
" bindingKey=b nextHopAddress=nextHop3")
# binding with duplicate pattern & next hop
self.assertRaises(Exception, self.run_qdmanage, router,
"create --type " + _BINDING_TYPE +
" --name Nuhuh exchangeName=Exchange2"
" key=b nextHop=nextHop3")
# binding with no next hop
self.assertRaises(Exception, self.run_qdmanage, router,
"create --type " + _BINDING_TYPE +
" --name Nuhuh exchangeName=Exchange2"
" bindingKey=x/y/z")
# invalid mqtt key
self.assertRaises(Exception, self.run_qdmanage, router,
"create --type " + _BINDING_TYPE +
" exchangeName=Exchange2"
" bindingKey=x/#/z"
" nextHopAddress=Nope")
# delete exchange by identity:
self.run_qdmanage(router, "delete --type " + _EXCHANGE_TYPE +
" --identity %s" % exchanges[0]["identity"])
def test_forwarding(self):
"""
Simple forwarding over a single 0-10 exchange
"""
config = [
('exchange', {'address': 'Address1',
'name': 'Exchange1',
'matchMethod': 'amqp'}),
# two different patterns, same next hop:
('binding', {'name': 'binding1',
'exchangeName': 'Exchange1',
'bindingKey': 'a.*',
'nextHopAddress': 'nextHop1'}),
('binding', {'name': 'binding2',
'exchangeName': 'Exchange1',
'bindingKey': 'a.b',
'nextHopAddress': 'nextHop1'}),
# duplicate patterns, different next hops:
('binding', {'name': 'binding3',
'exchangeName': 'Exchange1',
'bindingKey': 'a.c.#',
'nextHopAddress': 'nextHop1'}),
('binding', {'name': 'binding4',
'exchangeName': 'Exchange1',
'bindingKey': 'a.c.#',
'nextHopAddress': 'nextHop2'}),
# match for nextHop2 only
('binding', {'name': 'binding5',
'exchangeName': 'Exchange1',
'bindingKey': 'a.b.c',
'nextHopAddress': 'nextHop2'})
]
router = self._create_router('A', config)
# create clients for message transfer
conn = BlockingConnection(router.addresses[0])
sender = conn.create_sender(address="Address1", options=AtMostOnce())
nhop1 = conn.create_receiver(address="nextHop1", credit=100)
nhop2 = conn.create_receiver(address="nextHop2", credit=100)
# verify initial metrics
self._validate_exchange(router, name='Exchange1',
bindingCount=5,
receivedCount=0,
droppedCount=0,
forwardedCount=0,
divertedCount=0)
for b in range(5):
self._validate_binding(router,
name='binding%s' % (b + 1),
matchedCount=0)
# send message with subject "a.b"
# matches (binding1, binding2)
# forwarded to NextHop1 only
sender.send(Message(subject='a.b', body='A'))
self.assertEqual('A', nhop1.receive(timeout=TIMEOUT).body)
# send message with subject "a.c"
# matches (bindings 1,3,4)
# -> NextHop1, NextHop2
sender.send(Message(subject='a.c', body='B'))
self.assertEqual('B', nhop1.receive(timeout=TIMEOUT).body)
self.assertEqual('B', nhop2.receive(timeout=TIMEOUT).body)
# send message with subject "a.c.d"
# matches bindings 3,4
# -> NextHop1, NextHop2
sender.send(Message(subject='a.c.d', body='C'))
self.assertEqual('C', nhop1.receive(timeout=TIMEOUT).body)
self.assertEqual('C', nhop2.receive(timeout=TIMEOUT).body)
# send message with subject "x.y.z"
# no binding match - expected to drop
# not forwarded
sender.send(Message(subject='x.y.z', body=["I am Noone"]))
# send message with subject "a.b.c"
# matches binding5
# -> NextHop2
sender.send(Message(subject='a.b.c', body='D'))
self.assertEqual('D', nhop2.receive(timeout=TIMEOUT).body)
# ensure there are no more messages on either hop:
self.assertRaises(Timeout, nhop1.receive, timeout=0.25)
self.assertRaises(Timeout, nhop2.receive, timeout=0.25)
# validate counters
self._validate_binding(router, name='binding1',
matchedCount=2)
self._validate_binding(router, name='binding2',
matchedCount=1)
self._validate_binding(router, name='binding3',
matchedCount=2)
self._validate_binding(router, name='binding4',
matchedCount=2)
self._validate_binding(router, name='binding5',
matchedCount=1)
self._validate_exchange(router, name="Exchange1",
receivedCount=5,
forwardedCount=4,
divertedCount=0,
droppedCount=1)
conn.close()
def test_forwarding_mqtt(self):
"""
Simple forwarding over a single mqtt exchange
"""
config = [
('exchange', {'address': 'Address2',
'name': 'Exchange1',
'matchMethod': 'mqtt',
'alternateAddress': 'altNextHop'}),
('binding', {'name': 'binding1',
'exchangeName': 'Exchange1',
'bindingKey': 'a/b',
'nextHopAddress': 'nextHop1'}),
('binding', {'name': 'binding2',
'exchangeName': 'Exchange1',
'bindingKey': 'a/+',
'nextHopAddress': 'nextHop2'}),
('binding', {'name': 'binding3',
'exchangeName': 'Exchange1',
'bindingKey': 'c/#',
'nextHopAddress': 'nextHop1'}),
('binding', {'name': 'binding4',
'exchangeName': 'Exchange1',
'bindingKey': 'c/b',
'nextHopAddress': 'nextHop2'}),
]
router = self._create_router('B', config)
# create clients for message transfer
conn = BlockingConnection(router.addresses[0])
sender = conn.create_sender(address="Address2", options=AtMostOnce())
nhop1 = conn.create_receiver(address="nextHop1", credit=100)
nhop2 = conn.create_receiver(address="nextHop2", credit=100)
alt = conn.create_receiver(address="altNextHop", credit=100)
# send message with subject "a.b"
# matches (binding1, binding2)
# forwarded to NextHop1, NextHop2
sender.send(Message(subject='a/b', body='A'))
self.assertEqual('A', nhop1.receive(timeout=TIMEOUT).body)
self.assertEqual('A', nhop2.receive(timeout=TIMEOUT).body)
# send message with subject "a/c"
# matches binding2
# -> NextHop2
sender.send(Message(subject='a/c', body='B'))
self.assertEqual('B', nhop2.receive(timeout=TIMEOUT).body)
# send message with subject "c/b"
# matches bindings 3,4
# -> NextHop1, NextHop2
sender.send(Message(subject='c/b', body='C'))
self.assertEqual('C', nhop1.receive(timeout=TIMEOUT).body)
self.assertEqual('C', nhop2.receive(timeout=TIMEOUT).body)
# send message with subject "c/b/dee/eee"
# matches binding3
# -> NextHop1
sender.send(Message(subject='c/b/dee/eee', body='D'))
self.assertEqual('D', nhop1.receive(timeout=TIMEOUT).body)
# send message with subject "x.y.z"
# no binding match
# -> alternate
sender.send(Message(subject='x.y.z', body="?"))
self.assertEqual('?', alt.receive(timeout=TIMEOUT).body)
# ensure there are no more messages on either hop:
self.assertRaises(Timeout, nhop1.receive, timeout=0.25)
self.assertRaises(Timeout, nhop2.receive, timeout=0.25)
self.assertRaises(Timeout, alt.receive, timeout=0.25)
# validate counters
self._validate_binding(router, name='binding1',
matchedCount=1)
self._validate_binding(router, name='binding2',
matchedCount=2)
self._validate_binding(router, name='binding3',
matchedCount=2)
self._validate_binding(router, name='binding4',
matchedCount=1)
self._validate_exchange(router, name="Exchange1",
receivedCount=5,
forwardedCount=5,
divertedCount=1,
droppedCount=0)
conn.close()
def test_forwarding_sync(self):
"""
Forward unsettled messages to multiple subscribers
"""
config = [
('router', {'mode': 'standalone', 'id': 'QDR.mcast'}),
('listener', {'role': 'normal', 'host': '0.0.0.0',
'port': self.tester.get_port(),
'saslMechanisms':'ANONYMOUS'}),
('address', {'pattern': 'nextHop2/#', 'distribution': 'multicast'}),
('exchange', {'address': 'Address3',
'name': 'Exchange1',
'alternateAddress': 'altNextHop'}),
('binding', {'name': 'binding1',
'exchangeName': 'Exchange1',
'bindingKey': 'a.b',
'nextHopAddress': 'nextHop1'}),
('binding', {'name': 'binding2',
'exchangeName': 'Exchange1',
'bindingKey': '*.b',
'nextHopAddress': 'nextHop2'})
]
router = self.tester.qdrouterd('QDR.mcast', Qdrouterd.Config(config))
# create clients for message transfer
conn = BlockingConnection(router.addresses[0])
sender = conn.create_sender(address="Address3", options=AtLeastOnce())
nhop1 = AsyncTestReceiver(address=router.addresses[0], source="nextHop1")
nhop2A = AsyncTestReceiver(address=router.addresses[0], source="nextHop2")
nhop2B = AsyncTestReceiver(address=router.addresses[0], source="nextHop2")
alt = AsyncTestReceiver(address=router.addresses[0], source="altNextHop")
sender.send(Message(subject='a.b', body='A'))
sender.send(Message(subject='x.y', body='B'))
self.assertEqual('A', nhop1.queue.get(timeout=TIMEOUT).body)
self.assertEqual('A', nhop2A.queue.get(timeout=TIMEOUT).body)
self.assertEqual('A', nhop2B.queue.get(timeout=TIMEOUT).body)
self.assertEqual('B', alt.queue.get(timeout=TIMEOUT).body)
nhop1.stop()
nhop2A.stop()
nhop2B.stop()
alt.stop()
conn.close()
self.assertTrue(nhop1.queue.empty())
self.assertTrue(nhop2A.queue.empty())
self.assertTrue(nhop2B.queue.empty())
self.assertTrue(alt.queue.empty())
def test_remote_exchange(self):
"""
Verify that the exchange and bindings are visible to other routers in
the network
"""
def router(self, name, extra_config):
config = [
('router', {'mode': 'interior', 'id': 'QDR.%s'%name, 'allowUnsettledMulticast': 'yes'}),
('listener', {'port': self.tester.get_port(), 'stripAnnotations': 'no'})
] + extra_config
config = Qdrouterd.Config(config)
self.routers.append(self.tester.qdrouterd(name, config, wait=True))
self.inter_router_port = self.tester.get_port()
self.routers = []
router(self, 'A',
[('listener',
{'role': 'inter-router', 'port': self.inter_router_port}),
('address', {'pattern': 'nextHop1/#',
'distribution': 'multicast'}),
('address', {'pattern': 'nextHop2/#',
'distribution': 'balanced'}),
('address', {'pattern': 'nextHop3/#',
'distribution': 'closest'}),
('exchange', {'address': 'AddressA',
'name': 'ExchangeA',
'matchMethod': 'mqtt'}),
('binding', {'name': 'bindingA1',
'exchangeName': 'ExchangeA',
'bindingKey': 'a/b',
'nextHopAddress': 'nextHop1'}),
('binding', {'name': 'bindingA2',
'exchangeName': 'ExchangeA',
'bindingKey': 'a/+',
'nextHopAddress': 'nextHop2'}),
('binding', {'name': 'bindingA3',
'exchangeName': 'ExchangeA',
'bindingKey': '+/b',
'nextHopAddress': 'nextHop3'}),
('binding', {'name': 'bindingA4',
'exchangeName': 'ExchangeA',
'bindingKey': 'a/#',
'nextHopAddress': 'NotSubscribed'})
])
router(self, 'B',
[('connector', {'name': 'connectorToA',
'role': 'inter-router',
'port': self.inter_router_port,
'verifyHostname': 'no'}),
('address', {'pattern': 'nextHop1/#',
'distribution': 'multicast'}),
('address', {'pattern': 'nextHop2/#',
'distribution': 'balanced'}),
('address', {'pattern': 'nextHop3/#',
'distribution': 'closest'})
])
self.routers[0].wait_router_connected('QDR.B')
self.routers[1].wait_router_connected('QDR.A')
self.routers[1].wait_address('AddressA')
# connect clients to router B (no exchange)
nhop1A = AsyncTestReceiver(self.routers[1].addresses[0], 'nextHop1')
nhop1B = AsyncTestReceiver(self.routers[1].addresses[0], 'nextHop1')
nhop2 = AsyncTestReceiver(self.routers[1].addresses[0], 'nextHop2')
nhop3 = AsyncTestReceiver(self.routers[1].addresses[0], 'nextHop3')
self.routers[0].wait_address('nextHop1', remotes=1)
self.routers[0].wait_address('nextHop2', remotes=1)
self.routers[0].wait_address('nextHop3', remotes=1)
conn = BlockingConnection(self.routers[1].addresses[0])
sender = conn.create_sender(address="AddressA", options=AtLeastOnce())
sender.send(Message(subject='a/b', body='Hi!'))
# multicast
self.assertEqual('Hi!', nhop1A.queue.get(timeout=TIMEOUT).body)
self.assertEqual('Hi!', nhop1B.queue.get(timeout=TIMEOUT).body)
# balanced and closest
self.assertEqual('Hi!', nhop2.queue.get(timeout=TIMEOUT).body)
self.assertEqual('Hi!', nhop3.queue.get(timeout=TIMEOUT).body)
nhop1A.stop()
nhop1B.stop()
nhop2.stop()
nhop3.stop()
conn.close()
def test_large_messages(self):
"""
Verify that multi-frame messages are forwarded properly
"""
MAX_FRAME=1024
config = [
('router', {'mode': 'interior', 'id': 'QDR.X',
'allowUnsettledMulticast': 'yes'}),
('listener', {'port': self.tester.get_port(),
'stripAnnotations': 'no',
'maxFrameSize': MAX_FRAME}),
('address', {'pattern': 'nextHop1/#',
'distribution': 'multicast'}),
('exchange', {'address': 'AddressA',
'name': 'ExchangeA'}),
('binding', {'name': 'bindingA1',
'exchangeName': 'ExchangeA',
'bindingKey': 'a/b',
'nextHopAddress': 'nextHop1'})
]
router = self.tester.qdrouterd('QDR.X',
Qdrouterd.Config(config),
wait=True)
# connect clients to router B (no exchange)
nhop1A = AsyncTestReceiver(router.addresses[0], 'nextHop1',
conn_args={'max_frame_size': MAX_FRAME})
nhop1B = AsyncTestReceiver(router.addresses[0], 'nextHop1',
conn_args={'max_frame_size': MAX_FRAME})
conn = BlockingConnection(router.addresses[0],
max_frame_size=MAX_FRAME)
sender = conn.create_sender(address="AddressA")
jumbo = (10 * MAX_FRAME) * 'X'
sender.send(Message(subject='a/b', body=jumbo))
# multicast
self.assertEqual(jumbo, nhop1A.queue.get(timeout=TIMEOUT).body)
self.assertEqual(jumbo, nhop1B.queue.get(timeout=TIMEOUT).body)
nhop1A.stop()
nhop1B.stop()
conn.close()
def test_forwarding_fanout(self):
"""
Verify bindings that do not have a key receive all messages
"""
config = [
('exchange', {'address': 'AddressF',
'name': 'ExchangeF'}),
('binding', {'name': 'binding1',
'exchangeName': 'ExchangeF',
'bindingKey': 'pattern',
'nextHopAddress': 'nextHop1'}),
# two bindings w/o key
('binding', {'name': 'binding2',
'exchangeName': 'ExchangeF',
'nextHopAddress': 'nextHop2'}),
('binding', {'name': 'binding3',
'exchangeName': 'ExchangeF',
'nextHopAddress': 'nextHop3'})
]
for meth in ['amqp', 'mqtt']:
config[0][1]['matchMethod'] = meth
router = self._create_router('A', config)
# create clients for message transfer
conn = BlockingConnection(router.addresses[0])
sender = conn.create_sender(address="AddressF", options=AtMostOnce())
nhop1 = conn.create_receiver(address="nextHop1", credit=100)
nhop2 = conn.create_receiver(address="nextHop2", credit=100)
nhop3 = conn.create_receiver(address="nextHop3", credit=100)
# send message with subject "nope"
# should arrive at nextHop2 & 3 only
sender.send(Message(subject='nope', body='A'))
self.assertEqual('A', nhop2.receive(timeout=TIMEOUT).body)
self.assertEqual('A', nhop3.receive(timeout=TIMEOUT).body)
# send message with subject "pattern"
# forwarded to all bindings:
sender.send(Message(subject='pattern', body='B'))
self.assertEqual('B', nhop1.receive(timeout=TIMEOUT).body)
self.assertEqual('B', nhop2.receive(timeout=TIMEOUT).body)
self.assertEqual('B', nhop3.receive(timeout=TIMEOUT).body)
conn.close()
router.teardown()
if __name__ == '__main__':
unittest.main(main_module())
| |
#!/usr/bin/python -u
#
# Setup script for libxml2 and libxslt if found
#
import sys, os
from distutils.core import setup, Extension
# Below ROOT, we expect to find include, include/libxml2, lib and bin.
# On *nix, it is not needed (but should not harm),
# on Windows, it is set by configure.js.
ROOT = r'/home/hub/revenge/neighborly/vendor/bundle/ruby/2.1.0/gems/nokogiri-1.6.0/ports/i686-linux-gnu/libxml2/2.8.0'
# Thread-enabled libxml2
with_threads = 1
# If this flag is set (windows only),
# a private copy of the dlls are included in the package.
# If this flag is not set, the libxml2 and libxslt
# dlls must be found somewhere in the PATH at runtime.
WITHDLLS = 1 and sys.platform.startswith('win')
def missing(file):
if os.access(file, os.R_OK) == 0:
return 1
return 0
try:
HOME = os.environ['HOME']
except:
HOME="C:"
if WITHDLLS:
# libxml dlls (expected in ROOT/bin)
dlls = [ 'iconv.dll','libxml2.dll','libxslt.dll','libexslt.dll' ]
dlls = map(lambda dll: os.path.join(ROOT,'bin',dll),dlls)
# create __init__.py for the libxmlmods package
if not os.path.exists("libxmlmods"):
os.mkdir("libxmlmods")
open("libxmlmods/__init__.py","w").close()
def altImport(s):
s = s.replace("import libxml2mod","from libxmlmods import libxml2mod")
s = s.replace("import libxsltmod","from libxmlmods import libxsltmod")
return s
if sys.platform.startswith('win'):
libraryPrefix = 'lib'
platformLibs = []
else:
libraryPrefix = ''
platformLibs = ["m","z"]
# those are examined to find
# - libxml2/libxml/tree.h
# - iconv.h
# - libxslt/xsltconfig.h
includes_dir = [
"/usr/include",
"/usr/local/include",
"/opt/include",
os.path.join(ROOT,'include'),
HOME
];
xml_includes=""
for dir in includes_dir:
if not missing(dir + "/libxml2/libxml/tree.h"):
xml_includes=dir + "/libxml2"
break;
if xml_includes == "":
print "failed to find headers for libxml2: update includes_dir"
sys.exit(1)
iconv_includes=""
for dir in includes_dir:
if not missing(dir + "/iconv.h"):
iconv_includes=dir
break;
if iconv_includes == "":
print "failed to find headers for libiconv: update includes_dir"
sys.exit(1)
# those are added in the linker search path for libraries
libdirs = [
os.path.join(ROOT,'lib'),
]
xml_files = ["libxml2-api.xml", "libxml2-python-api.xml",
"libxml.c", "libxml.py", "libxml_wrap.h", "types.c",
"xmlgenerator.py", "README", "TODO", "drv_libxml2.py"]
xslt_files = ["libxslt-api.xml", "libxslt-python-api.xml",
"libxslt.c", "libxsl.py", "libxslt_wrap.h",
"xsltgenerator.py"]
if missing("libxml2-py.c") or missing("libxml2.py"):
try:
try:
import xmlgenerator
except:
import generator
except:
print "failed to find and generate stubs for libxml2, aborting ..."
print sys.exc_type, sys.exc_value
sys.exit(1)
head = open("libxml.py", "r")
generated = open("libxml2class.py", "r")
result = open("libxml2.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=0
if missing("libxslt-py.c") or missing("libxslt.py"):
if missing("xsltgenerator.py") or missing("libxslt-api.xml"):
print "libxslt stub generator not found, libxslt not built"
else:
try:
import xsltgenerator
except:
print "failed to generate stubs for libxslt, aborting ..."
print sys.exc_type, sys.exc_value
else:
head = open("libxsl.py", "r")
generated = open("libxsltclass.py", "r")
result = open("libxslt.py", "w")
for line in head.readlines():
if WITHDLLS:
result.write(altImport(line))
else:
result.write(line)
for line in generated.readlines():
result.write(line)
head.close()
generated.close()
result.close()
with_xslt=1
else:
with_xslt=1
if with_xslt == 1:
xslt_includes=""
for dir in includes_dir:
if not missing(dir + "/libxslt/xsltconfig.h"):
xslt_includes=dir + "/libxslt"
break;
if xslt_includes == "":
print "failed to find headers for libxslt: update includes_dir"
with_xslt = 0
descr = "libxml2 package"
modules = [ 'libxml2', 'drv_libxml2' ]
if WITHDLLS:
modules.append('libxmlmods.__init__')
c_files = ['libxml2-py.c', 'libxml.c', 'types.c' ]
includes= [xml_includes, iconv_includes]
libs = [libraryPrefix + "xml2"] + platformLibs
macros = []
if with_threads:
macros.append(('_REENTRANT','1'))
if with_xslt == 1:
descr = "libxml2 and libxslt package"
if not sys.platform.startswith('win'):
#
# We are gonna build 2 identical shared libs with merge initializing
# both libxml2mod and libxsltmod
#
c_files = c_files + ['libxslt-py.c', 'libxslt.c']
xslt_c_files = c_files
macros.append(('MERGED_MODULES', '1'))
else:
#
# On windows the MERGED_MODULE option is not needed
# (and does not work)
#
xslt_c_files = ['libxslt-py.c', 'libxslt.c', 'types.c']
libs.insert(0, libraryPrefix + 'exslt')
libs.insert(0, libraryPrefix + 'xslt')
includes.append(xslt_includes)
modules.append('libxslt')
extens=[Extension('libxml2mod', c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros)]
if with_xslt == 1:
extens.append(Extension('libxsltmod', xslt_c_files, include_dirs=includes,
library_dirs=libdirs,
libraries=libs, define_macros=macros))
if missing("MANIFEST"):
manifest = open("MANIFEST", "w")
manifest.write("setup.py\n")
for file in xml_files:
manifest.write(file + "\n")
if with_xslt == 1:
for file in xslt_files:
manifest.write(file + "\n")
manifest.close()
if WITHDLLS:
ext_package = "libxmlmods"
if sys.version >= "2.2":
base = "lib/site-packages/"
else:
base = ""
data_files = [(base+"libxmlmods",dlls)]
else:
ext_package = None
data_files = []
setup (name = "libxml2-python",
# On *nix, the version number is created from setup.py.in
# On windows, it is set by configure.js
version = "2.8.0",
description = descr,
author = "Daniel Veillard",
author_email = "veillard@redhat.com",
url = "http://xmlsoft.org/python.html",
licence="MIT Licence",
py_modules=modules,
ext_modules=extens,
ext_package=ext_package,
data_files=data_files,
)
sys.exit(0)
| |
"""Tests for the /api/v1/users endpoints."""
from pytest_mongo import factories
from pytest_redis import factories
from .common import ( # noqa (fixture)
ADMIN_DEMOGRAPHICS,
clear_db,
client,
decode_response,
get_csrf_token,
register_test_accounts,
TEACHER_DEMOGRAPHICS,
STUDENT_DEMOGRAPHICS,
STUDENT_2_DEMOGRAPHICS,
OTHER_USER_DEMOGRAPHICS,
get_conn,
RATE_LIMIT_BYPASS_KEY,
)
import api
def test_get_users(mongo_proc, redis_proc, client): # noqa (fixture)
"""Tests the GET /users endpoint."""
clear_db()
register_test_accounts()
client.post(
"/api/v1/user/login",
json={
"username": ADMIN_DEMOGRAPHICS["username"],
"password": ADMIN_DEMOGRAPHICS["password"],
},
headers=[("Limit-Bypass", RATE_LIMIT_BYPASS_KEY)],
)
res = client.get("/api/v1/users")
assert res.status_code == 200
assert len(res.json) == 5
for username in [
ADMIN_DEMOGRAPHICS["username"],
STUDENT_DEMOGRAPHICS["username"],
STUDENT_2_DEMOGRAPHICS["username"],
OTHER_USER_DEMOGRAPHICS["username"],
TEACHER_DEMOGRAPHICS["username"],
]:
assert username in str(res.json)
def test_add_user(mongo_proc, redis_proc, client): # noqa (fixture)
"""Tests the POST /users endpoint."""
clear_db()
# Attempt to specify an invalid age (this field is verified in the route)
res = client.post(
"/api/v1/users",
json={
"email": "admin@sample.com",
"firstname": "Adminuser",
"lastname": "Test",
"password": "adminuser",
"username": "adminuser",
"affiliation": "Testing",
"usertype": "other",
"country": "US",
"demo": {"age": "invalid"},
},
headers=[("Limit-Bypass", RATE_LIMIT_BYPASS_KEY)],
)
assert res.status_code == 400
assert (
res.json["message"]
== "'age' must be specified in the 'demo' object. Valid values "
+ "are: ['13-17', '18+']"
)
# Force-enable the parent verification email setting and submit without
api.config.get_settings()
db = get_conn()
db.settings.find_one_and_update(
{}, {"$set": {"email.parent_verification_email": True}}
)
res = client.post(
"/api/v1/users",
json={
"email": "admin@sample.com",
"firstname": "Adminuser",
"lastname": "Test",
"password": "adminuser",
"username": "adminuser",
"affiliation": "Testing",
"usertype": "other",
"country": "US",
"demo": {"age": "13-17"},
},
headers=[("Limit-Bypass", RATE_LIMIT_BYPASS_KEY)],
)
assert res.status_code == 400
assert (
res.json["message"]
== "Must provide a valid parent email address under the key "
+ "'demo.parentemail'."
)
# Attempt to specify a non-alphanumeric username
res = client.post(
"/api/v1/users",
json={
"email": "admin@sample.com",
"firstname": "Adminuser",
"lastname": "Test",
"password": "adminuser",
"username": "invalid-username!",
"affiliation": "Testing",
"usertype": "other",
"country": "US",
"demo": {"age": "13-17", "parentemail": "parent@sample.com"},
},
headers=[("Limit-Bypass", RATE_LIMIT_BYPASS_KEY)],
)
assert res.status_code == 400
assert res.json["message"] == "Usernames must be alphanumeric."
# Create the user and verify properties
res = client.post(
"/api/v1/users",
json={
"email": "admin@sample.com",
"firstname": "Adminuser",
"lastname": "Test",
"password": "adminuser",
"username": "adminuser",
"affiliation": "Testing",
"usertype": "other",
"country": "US",
"demo": {"age": "13-17", "parentemail": "parent@sample.com"},
},
headers=[("Limit-Bypass", RATE_LIMIT_BYPASS_KEY)],
)
assert res.status_code == 201
assert res.json["success"] is True
uid = res.json["uid"]
admin_user = db.users.find_one({"uid": uid})
assert admin_user["email"] == "admin@sample.com"
assert admin_user["firstname"] == "Adminuser"
assert admin_user["lastname"] == "Test"
assert admin_user["username"] == "adminuser"
assert admin_user["usertype"] == "other"
assert admin_user["country"] == "US"
assert admin_user["demo"] == {"age": "13-17", "parentemail": "parent@sample.com"}
assert admin_user["disabled"] is False
assert admin_user["verified"] is True
assert admin_user["extdata"] == {}
assert admin_user["completed_minigames"] == []
assert admin_user["unlocked_walkthroughs"] == []
assert admin_user["tokens"] == 0
assert admin_user["teacher"] is True
assert admin_user["admin"] is True
for other_field in ["uid", "tid", "password_hash"]:
assert other_field in admin_user
assert "affiliation" not in admin_user
assert "password" not in admin_user
admin_team = db.teams.find_one({"tid": admin_user["tid"]})
assert admin_team["team_name"] == "adminuser"
assert admin_team["affiliation"] == "Testing"
assert admin_team["size"] == 1
for other_field in ["tid", "password", "instances"]:
assert other_field in admin_team
# Create a teacher user and verify its roles
res = client.post(
"/api/v1/users",
json={
"email": "teacher@sample.com",
"firstname": "Teacheruser",
"lastname": "Test",
"password": "teacheruser",
"username": "teacheruser",
"affiliation": "Testing",
"usertype": "teacher",
"country": "US",
"demo": {"age": "18+"},
},
headers=[("Limit-Bypass", RATE_LIMIT_BYPASS_KEY)],
)
assert res.status_code == 201
assert res.json["success"] is True
uid = res.json["uid"]
teacher_user = db.users.find_one({"uid": uid})
assert teacher_user["teacher"] is True
assert teacher_user["admin"] is False
# Create a standard user and verify its roles
res = client.post(
"/api/v1/users",
json={
"email": "user@sample.com",
"firstname": "Testuser",
"lastname": "Test",
"password": "testuser",
"username": "testuser",
"affiliation": "Testing",
"usertype": "student",
"country": "US",
"demo": {"age": "18+"},
},
headers=[("Limit-Bypass", RATE_LIMIT_BYPASS_KEY)],
)
assert res.status_code == 201
assert res.json["success"] is True
uid = res.json["uid"]
teacher_user = db.users.find_one({"uid": uid})
assert teacher_user["teacher"] is False
assert teacher_user["admin"] is False
def test_get_one_user(mongo_proc, redis_proc, client): # noqa (fixture)
"""Tests the GET /users/<uid> endpoint."""
clear_db()
register_test_accounts()
client.post(
"/api/v1/user/login",
json={
"username": ADMIN_DEMOGRAPHICS["username"],
"password": ADMIN_DEMOGRAPHICS["password"],
},
headers=[("Limit-Bypass", RATE_LIMIT_BYPASS_KEY)],
)
db = get_conn()
test_account_uid = db.users.find_one(
{"username": STUDENT_DEMOGRAPHICS["username"]}
)["uid"]
# Attempt to get nonexistent user
res = client.get("/api/v1/users/invalid")
assert res.status_code == 404
assert res.json["message"] == "User not found"
# Get a valid user
res = client.get(f"/api/v1/users/{test_account_uid}")
assert res.status_code == 200
assert STUDENT_DEMOGRAPHICS["username"] in str(res.json)
| |
from __future__ import unicode_literals
from datetime import datetime, timedelta
import threading
import warnings
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.db import connections, DEFAULT_DB_ALIAS
from django.db import DatabaseError
from django.db.models.fields import Field
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.manager import BaseManager
from django.db.models.query import QuerySet, EmptyQuerySet, ValuesListQuerySet, MAX_GET_RESULTS
from django.test import TestCase, TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature
from django.utils import six
from django.utils.translation import ugettext_lazy
from .models import Article, SelfRef, ArticleSelectOnSave
class ModelInstanceCreationTests(TestCase):
def test_object_is_not_written_to_database_until_save_was_called(self):
a = Article(
id=None,
headline='Area man programs in Python',
pub_date=datetime(2005, 7, 28),
)
self.assertIsNone(a.id)
self.assertEqual(Article.objects.all().count(), 0)
# Save it into the database. You have to call save() explicitly.
a.save()
self.assertIsNotNone(a.id)
self.assertEqual(Article.objects.all().count(), 1)
def test_can_initialize_model_instance_using_positional_arguments(self):
"""
You can initialize a model instance using positional arguments,
which should match the field order as defined in the model.
"""
a = Article(None, 'Second article', datetime(2005, 7, 29))
a.save()
self.assertEqual(a.headline, 'Second article')
self.assertEqual(a.pub_date, datetime(2005, 7, 29, 0, 0))
def test_can_create_instance_using_kwargs(self):
a = Article(
id=None,
headline='Third article',
pub_date=datetime(2005, 7, 30),
)
a.save()
self.assertEqual(a.headline, 'Third article')
self.assertEqual(a.pub_date, datetime(2005, 7, 30, 0, 0))
def test_autofields_generate_different_values_for_each_instance(self):
a1 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a2 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
a3 = Article.objects.create(headline='First', pub_date=datetime(2005, 7, 30, 0, 0))
self.assertNotEqual(a3.id, a1.id)
self.assertNotEqual(a3.id, a2.id)
def test_can_mix_and_match_position_and_kwargs(self):
# You can also mix and match position and keyword arguments, but
# be sure not to duplicate field information.
a = Article(None, 'Fourth article', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Fourth article')
def test_cannot_create_instance_with_invalid_kwargs(self):
six.assertRaisesRegex(
self,
TypeError,
"'foo' is an invalid keyword argument for this function",
Article,
id=None,
headline='Some headline',
pub_date=datetime(2005, 7, 31),
foo='bar',
)
def test_can_leave_off_value_for_autofield_and_it_gets_value_on_save(self):
"""
You can leave off the value for an AutoField when creating an
object, because it'll get filled in automatically when you save().
"""
a = Article(headline='Article 5', pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Article 5')
self.assertNotEqual(a.id, None)
def test_leaving_off_a_field_with_default_set_the_default_will_be_saved(self):
a = Article(pub_date=datetime(2005, 7, 31))
a.save()
self.assertEqual(a.headline, 'Default headline')
def test_for_datetimefields_saves_as_much_precision_as_was_given(self):
"""as much precision in *seconds*"""
a1 = Article(
headline='Article 7',
pub_date=datetime(2005, 7, 31, 12, 30),
)
a1.save()
self.assertEqual(Article.objects.get(id__exact=a1.id).pub_date,
datetime(2005, 7, 31, 12, 30))
a2 = Article(
headline='Article 8',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a2.save()
self.assertEqual(Article.objects.get(id__exact=a2.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45))
def test_saving_an_object_again_does_not_create_a_new_object(self):
a = Article(headline='original', pub_date=datetime(2014, 5, 16))
a.save()
current_id = a.id
a.save()
self.assertEqual(a.id, current_id)
a.headline = 'Updated headline'
a.save()
self.assertEqual(a.id, current_id)
def test_querysets_checking_for_membership(self):
headlines = [
'Area man programs in Python', 'Second article', 'Third article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
a = Article(headline='Some headline', pub_date=some_pub_date)
a.save()
# You can use 'in' to test for membership...
self.assertIn(a, Article.objects.all())
# ... but there will often be more efficient ways if that is all you need:
self.assertTrue(Article.objects.filter(id=a.id).exists())
class ModelTest(TestCase):
def test_objects_attribute_is_only_available_on_the_class_itself(self):
six.assertRaisesRegex(
self,
AttributeError,
"Manager isn't accessible via Article instances",
getattr,
Article(),
"objects",
)
self.assertFalse(hasattr(Article(), 'objects'))
self.assertTrue(hasattr(Article, 'objects'))
def test_queryset_delete_removes_all_items_in_that_queryset(self):
headlines = [
'An article', 'Article One', 'Amazing article', 'Boring article']
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Amazing article>",
"<Article: An article>",
"<Article: Article One>",
"<Article: Boring article>"])
Article.objects.filter(headline__startswith='A').delete()
self.assertQuerysetEqual(Article.objects.all().order_by('headline'),
["<Article: Boring article>"])
def test_not_equal_and_equal_operators_behave_as_expected_on_instances(self):
some_pub_date = datetime(2014, 5, 16, 12, 1)
a1 = Article.objects.create(headline='First', pub_date=some_pub_date)
a2 = Article.objects.create(headline='Second', pub_date=some_pub_date)
self.assertNotEqual(a1, a2)
self.assertEqual(a1, Article.objects.get(id__exact=a1.id))
self.assertNotEqual(Article.objects.get(id__exact=a1.id), Article.objects.get(id__exact=a2.id))
def test_multiple_objects_max_num_fetched(self):
"""
#6785 - get() should fetch a limited number of results.
"""
Article.objects.bulk_create(
Article(headline='Area %s' % i, pub_date=datetime(2005, 7, 28))
for i in range(MAX_GET_RESULTS)
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned %d!" % MAX_GET_RESULTS,
Article.objects.get,
headline__startswith='Area',
)
Article.objects.create(headline='Area %s' % MAX_GET_RESULTS, pub_date=datetime(2005, 7, 28))
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned more than %d!" % MAX_GET_RESULTS,
Article.objects.get,
headline__startswith='Area',
)
@skipUnlessDBFeature('supports_microsecond_precision')
def test_microsecond_precision(self):
# In PostgreSQL, microsecond-level precision is available.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(pk=a9.pk).pub_date,
datetime(2005, 7, 31, 12, 30, 45, 180))
@skipIfDBFeature('supports_microsecond_precision')
def test_microsecond_precision_not_supported(self):
# In MySQL, microsecond-level precision isn't available. You'll lose
# microsecond-level precision once the data is saved.
a9 = Article(
headline='Article 9',
pub_date=datetime(2005, 7, 31, 12, 30, 45, 180),
)
a9.save()
self.assertEqual(Article.objects.get(id__exact=a9.id).pub_date,
datetime(2005, 7, 31, 12, 30, 45))
def test_manually_specify_primary_key(self):
# You can manually specify the primary key when creating a new object.
a101 = Article(
id=101,
headline='Article 101',
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a101.save()
a101 = Article.objects.get(pk=101)
self.assertEqual(a101.headline, 'Article 101')
def test_create_method(self):
# You can create saved objects in a single step
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
self.assertEqual(Article.objects.get(headline="Article 10"), a10)
def test_year_lookup_edge_case(self):
# Edge-case test: A year lookup should retrieve all objects in
# the given year, including Jan. 1 and Dec. 31.
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
self.assertQuerysetEqual(Article.objects.filter(pub_date__year=2008),
["<Article: Article 11>", "<Article: Article 12>"])
def test_unicode_data(self):
# Unicode data works, too.
a = Article(
headline='\u6797\u539f \u3081\u3050\u307f',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.get(pk=a.id).headline,
'\u6797\u539f \u3081\u3050\u307f')
def test_hash_function(self):
# Model instances have a hash function, so they can be used in sets
# or as dictionary keys. Two models compare as equal if their primary
# keys are equal.
a10 = Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
a11 = Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
a12 = Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
s = {a10, a11, a12}
self.assertIn(Article.objects.get(headline='Article 11'), s)
def test_field_ordering(self):
"""
Field instances have a `__lt__` comparison function to define an
ordering based on their creation. Prior to #17851 this ordering
comparison relied on the now unsupported `__cmp__` and was assuming
compared objects were both Field instances raising `AttributeError`
when it should have returned `NotImplemented`.
"""
f1 = Field()
f2 = Field(auto_created=True)
f3 = Field()
self.assertLess(f2, f1)
self.assertGreater(f3, f1)
self.assertIsNotNone(f1)
self.assertNotIn(f2, (None, 1, ''))
def test_extra_method_select_argument_with_dashes_and_values(self):
# The 'select' argument to extra() supports names with dashes in
# them, as long as you use values().
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
dicts = Article.objects.filter(
pub_date__year=2008).extra(
select={'dashed-value': '1'}).values('headline', 'dashed-value')
self.assertEqual([sorted(d.items()) for d in dicts],
[[('dashed-value', 1), ('headline', 'Article 11')], [('dashed-value', 1), ('headline', 'Article 12')]])
def test_extra_method_select_argument_with_dashes(self):
# If you use 'select' with extra() and names containing dashes on a
# query that's *not* a values() query, those extra 'select' values
# will silently be ignored.
Article.objects.create(
headline="Article 10",
pub_date=datetime(2005, 7, 31, 12, 30, 45),
)
Article.objects.create(
headline='Article 11',
pub_date=datetime(2008, 1, 1),
)
Article.objects.create(
headline='Article 12',
pub_date=datetime(2008, 12, 31, 23, 59, 59, 999999),
)
articles = Article.objects.filter(
pub_date__year=2008).extra(select={'dashed-value': '1', 'undashedvalue': '2'})
self.assertEqual(articles[0].undashedvalue, 2)
def test_create_relation_with_ugettext_lazy(self):
"""
Test that ugettext_lazy objects work when saving model instances
through various methods. Refs #10498.
"""
notlazy = 'test'
lazy = ugettext_lazy(notlazy)
Article.objects.create(headline=lazy, pub_date=datetime.now())
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# test that assign + save works with Promise objecs
article.headline = lazy
article.save()
self.assertEqual(article.headline, notlazy)
# test .update()
Article.objects.update(headline=lazy)
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
# still test bulk_create()
Article.objects.all().delete()
Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())])
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
def test_emptyqs(self):
# Can't be instantiated
with self.assertRaises(TypeError):
EmptyQuerySet()
self.assertIsInstance(Article.objects.none(), EmptyQuerySet)
def test_emptyqs_values(self):
# test for #15959
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
qs = Article.objects.none().values_list('pk')
self.assertIsInstance(qs, EmptyQuerySet)
self.assertIsInstance(qs, ValuesListQuerySet)
self.assertEqual(len(qs), 0)
def test_emptyqs_customqs(self):
# A hacky test for custom QuerySet subclass - refs #17271
Article.objects.create(headline='foo', pub_date=datetime.now())
class CustomQuerySet(QuerySet):
def do_something(self):
return 'did something'
qs = Article.objects.all()
qs.__class__ = CustomQuerySet
qs = qs.none()
with self.assertNumQueries(0):
self.assertEqual(len(qs), 0)
self.assertIsInstance(qs, EmptyQuerySet)
self.assertEqual(qs.do_something(), 'did something')
def test_emptyqs_values_order(self):
# Tests for ticket #17712
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().values_list('id').order_by('id')), 0)
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().filter(
id__in=Article.objects.values_list('id', flat=True))), 0)
@skipUnlessDBFeature('can_distinct_on_fields')
def test_emptyqs_distinct(self):
# Tests for #19426
Article.objects.create(headline='foo', pub_date=datetime.now())
with self.assertNumQueries(0):
self.assertEqual(len(Article.objects.none().distinct('headline', 'pub_date')), 0)
def test_ticket_20278(self):
sr = SelfRef.objects.create()
with self.assertRaises(ObjectDoesNotExist):
SelfRef.objects.get(selfref=sr)
def test_eq(self):
self.assertEqual(Article(id=1), Article(id=1))
self.assertNotEqual(Article(id=1), object())
self.assertNotEqual(object(), Article(id=1))
a = Article()
self.assertEqual(a, a)
self.assertNotEqual(Article(), a)
def test_hash(self):
# Value based on PK
self.assertEqual(hash(Article(id=1)), hash(1))
with self.assertRaises(TypeError):
# No PK value -> unhashable (because save() would then change
# hash)
hash(Article())
class ModelLookupTest(TestCase):
def setUp(self):
# Create an Article.
self.a = Article(
id=None,
headline='Area woman programs in Python',
pub_date=datetime(2005, 7, 28),
)
# Save it into the database. You have to call save() explicitly.
self.a.save()
def test_all_lookup(self):
# Change values by changing the attributes, then calling save().
self.a.headline = 'Area man programs in Python'
self.a.save()
# Article.objects.all() returns all the articles in the database.
self.assertQuerysetEqual(Article.objects.all(),
['<Article: Area man programs in Python>'])
def test_rich_lookup(self):
# Django provides a rich database lookup API.
self.assertEqual(Article.objects.get(id__exact=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline__startswith='Area woman'), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7), self.a)
self.assertEqual(Article.objects.get(pub_date__year=2005, pub_date__month=7, pub_date__day=28), self.a)
self.assertEqual(Article.objects.get(pub_date__week_day=5), self.a)
def test_equal_lookup(self):
# The "__exact" lookup type can be omitted, as a shortcut.
self.assertEqual(Article.objects.get(id=self.a.id), self.a)
self.assertEqual(Article.objects.get(headline='Area woman programs in Python'), self.a)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2004),
[],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__year=2005, pub_date__month=7),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=5),
['<Article: Area woman programs in Python>'],
)
self.assertQuerysetEqual(
Article.objects.filter(pub_date__week_day=6),
[],
)
def test_does_not_exist(self):
# Django raises an Article.DoesNotExist exception for get() if the
# parameters don't match any object.
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
id__exact=2000,
)
# To avoid dict-ordering related errors check only one lookup
# in single assert.
self.assertRaises(
ObjectDoesNotExist,
Article.objects.get,
pub_date__year=2005,
pub_date__month=8,
)
six.assertRaisesRegex(
self,
ObjectDoesNotExist,
"Article matching query does not exist.",
Article.objects.get,
pub_date__week_day=6,
)
def test_lookup_by_primary_key(self):
# Lookup by a primary key is the most common case, so Django
# provides a shortcut for primary-key exact lookups.
# The following is identical to articles.get(id=a.id).
self.assertEqual(Article.objects.get(pk=self.a.id), self.a)
# pk can be used as a shortcut for the primary key name in any query.
self.assertQuerysetEqual(Article.objects.filter(pk__in=[self.a.id]),
["<Article: Area woman programs in Python>"])
# Model instances of the same type and same ID are considered equal.
a = Article.objects.get(pk=self.a.id)
b = Article.objects.get(pk=self.a.id)
self.assertEqual(a, b)
def test_too_many(self):
# Create a very similar object
a = Article(
id=None,
headline='Area man programs in Python',
pub_date=datetime(2005, 7, 28),
)
a.save()
self.assertEqual(Article.objects.count(), 2)
# Django raises an Article.MultipleObjectsReturned exception if the
# lookup matches more than one object
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
headline__startswith='Area',
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
)
six.assertRaisesRegex(
self,
MultipleObjectsReturned,
"get\(\) returned more than one Article -- it returned 2!",
Article.objects.get,
pub_date__year=2005,
pub_date__month=7,
)
class ConcurrentSaveTests(TransactionTestCase):
available_apps = ['basic']
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_concurrent_delete_with_save(self):
"""
Test fetching, deleting and finally saving an object - we should get
an insert in this case.
"""
a = Article.objects.create(headline='foo', pub_date=datetime.now())
exceptions = []
def deleter():
try:
# Do not delete a directly - doing so alters its state.
Article.objects.filter(pk=a.pk).delete()
except Exception as e:
exceptions.append(e)
finally:
connections[DEFAULT_DB_ALIAS].close()
self.assertEqual(len(exceptions), 0)
t = threading.Thread(target=deleter)
t.start()
t.join()
a.save()
self.assertEqual(Article.objects.get(pk=a.pk).headline, 'foo')
class ManagerTest(TestCase):
QUERYSET_PROXY_METHODS = [
'none',
'count',
'dates',
'datetimes',
'distinct',
'extra',
'get',
'get_or_create',
'update_or_create',
'create',
'bulk_create',
'filter',
'aggregate',
'annotate',
'complex_filter',
'exclude',
'in_bulk',
'iterator',
'earliest',
'latest',
'first',
'last',
'order_by',
'select_for_update',
'select_related',
'prefetch_related',
'values',
'values_list',
'update',
'reverse',
'defer',
'only',
'using',
'exists',
'_insert',
'_update',
'raw',
]
def test_manager_methods(self):
"""
This test ensures that the correct set of methods from `QuerySet`
are copied onto `Manager`.
It's particularly useful to prevent accidentally leaking new methods
into `Manager`. New `QuerySet` methods that should also be copied onto
`Manager` will need to be added to `ManagerTest.QUERYSET_PROXY_METHODS`.
"""
self.assertEqual(
sorted(BaseManager._get_queryset_methods(QuerySet).keys()),
sorted(self.QUERYSET_PROXY_METHODS),
)
class SelectOnSaveTests(TestCase):
def test_select_on_save(self):
a1 = Article.objects.create(pub_date=datetime.now())
with self.assertNumQueries(1):
a1.save()
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(2):
asos.save()
with self.assertNumQueries(1):
asos.save(force_update=True)
Article.objects.all().delete()
with self.assertRaises(DatabaseError):
with self.assertNumQueries(1):
asos.save(force_update=True)
def test_select_on_save_lying_update(self):
"""
Test that select_on_save works correctly if the database
doesn't return correct information about matched rows from
UPDATE.
"""
# Change the manager to not return "row matched" for update().
# We are going to change the Article's _base_manager class
# dynamically. This is a bit of a hack, but it seems hard to
# test this properly otherwise. Article's manager, because
# proxy models use their parent model's _base_manager.
orig_class = Article._base_manager.__class__
class FakeQuerySet(QuerySet):
# Make sure the _update method below is in fact called.
called = False
def _update(self, *args, **kwargs):
FakeQuerySet.called = True
super(FakeQuerySet, self)._update(*args, **kwargs)
return 0
class FakeManager(orig_class):
def get_queryset(self):
return FakeQuerySet(self.model)
try:
Article._base_manager.__class__ = FakeManager
asos = ArticleSelectOnSave.objects.create(pub_date=datetime.now())
with self.assertNumQueries(3):
asos.save()
self.assertTrue(FakeQuerySet.called)
# This is not wanted behavior, but this is how Django has always
# behaved for databases that do not return correct information
# about matched rows for UPDATE.
with self.assertRaises(DatabaseError):
asos.save(force_update=True)
with self.assertRaises(DatabaseError):
asos.save(update_fields=['pub_date'])
finally:
Article._base_manager.__class__ = orig_class
class ModelRefreshTests(TestCase):
def _truncate_ms(self, val):
# MySQL < 5.6.4 removes microseconds from the datetimes which can cause
# problems when comparing the original value to that loaded from DB
return val - timedelta(microseconds=val.microsecond)
def test_refresh(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
Article.objects.filter(pk=a.pk).update(headline='new headline')
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.headline, 'new headline')
orig_pub_date = a.pub_date
new_pub_date = a.pub_date + timedelta(10)
Article.objects.update(headline='new headline 2', pub_date=new_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db(fields=['headline'])
self.assertEqual(a.headline, 'new headline 2')
self.assertEqual(a.pub_date, orig_pub_date)
with self.assertNumQueries(1):
a.refresh_from_db()
self.assertEqual(a.pub_date, new_pub_date)
def test_refresh_fk(self):
s1 = SelfRef.objects.create()
s2 = SelfRef.objects.create()
s3 = SelfRef.objects.create(selfref=s1)
s3_copy = SelfRef.objects.get(pk=s3.pk)
s3_copy.selfref.touched = True
s3.selfref = s2
s3.save()
with self.assertNumQueries(1):
s3_copy.refresh_from_db()
with self.assertNumQueries(1):
# The old related instance was thrown away (the selfref_id has
# changed). It needs to be reloaded on access, so one query
# executed.
self.assertFalse(hasattr(s3_copy.selfref, 'touched'))
self.assertEqual(s3_copy.selfref, s2)
def test_refresh_unsaved(self):
pub_date = self._truncate_ms(datetime.now())
a = Article.objects.create(pub_date=pub_date)
a2 = Article(id=a.pk)
with self.assertNumQueries(1):
a2.refresh_from_db()
self.assertEqual(a2.pub_date, pub_date)
self.assertEqual(a2._state.db, "default")
def test_refresh_no_fields(self):
a = Article.objects.create(pub_date=self._truncate_ms(datetime.now()))
with self.assertNumQueries(0):
a.refresh_from_db(fields=[])
class TestRelatedObjectDeprecation(TestCase):
def test_field_related_deprecation(self):
field = SelfRef._meta.get_field('selfref')
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
self.assertIsInstance(field.related, ForeignObjectRel)
self.assertEqual(len(warns), 1)
self.assertEqual(
str(warns.pop().message),
'Usage of field.related has been deprecated. Use field.rel instead.'
)
| |
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import combinations
from itertools import product
import numpy as np
from scipy.misc import comb
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.fixes import bincount
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion,
random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(X, y, name, criterion):
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(n_estimators=20, criterion=criterion,random_state=0)
est.fit(X, y)
importances = est.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = est.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
# Check with parallel
importances = est.feature_importances_
est.set_params(n_jobs=2)
importances_parrallel = est.feature_importances_
assert_array_almost_equal(importances, importances_parrallel)
# Check with sample weights
sample_weight = check_random_state(0).randint(1, 10, len(X))
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=sample_weight)
importances = est.feature_importances_
assert_true(np.all(importances >= 0.0))
for scale in [0.5, 10, 100]:
est = ForestEstimator(n_estimators=20, random_state=0, criterion=criterion)
est.fit(X, y, sample_weight=scale * sample_weight)
importances_bis = est.feature_importances_
assert_less(np.abs(importances - importances_bis).mean(), 0.001)
def test_importances():
X, y = datasets.make_classification(n_samples=500, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name, criterion in product(FOREST_CLASSIFIERS, ["gini", "entropy"]):
yield check_importances, X, y, name, criterion
for name, criterion in product(FOREST_REGRESSORS, ["mse", "friedman_mse"]):
yield check_importances, X, y, name, criterion
def test_importances_asymptotic():
# Check whether variable importances of totally randomized trees
# converge towards their theoretical values (See Louppe et al,
# Understanding variable importances in forests of randomized trees, 2013).
def binomial(k, n):
return 0 if k < 0 or k > n else comb(int(n), int(k), exact=True)
def entropy(samples):
n_samples = len(samples)
entropy = 0.
for count in bincount(samples):
p = 1. * count / n_samples
if p > 0:
entropy -= p * np.log2(p)
return entropy
def mdi_importance(X_m, X, y):
n_samples, n_features = X.shape
features = list(range(n_features))
features.pop(X_m)
values = [np.unique(X[:, i]) for i in range(n_features)]
imp = 0.
for k in range(n_features):
# Weight of each B of size k
coef = 1. / (binomial(k, n_features) * (n_features - k))
# For all B of size k
for B in combinations(features, k):
# For all values B=b
for b in product(*[values[B[j]] for j in range(k)]):
mask_b = np.ones(n_samples, dtype=np.bool)
for j in range(k):
mask_b &= X[:, B[j]] == b[j]
X_, y_ = X[mask_b, :], y[mask_b]
n_samples_b = len(X_)
if n_samples_b > 0:
children = []
for xi in values[X_m]:
mask_xi = X_[:, X_m] == xi
children.append(y_[mask_xi])
imp += (coef
* (1. * n_samples_b / n_samples) # P(B=b)
* (entropy(y_) -
sum([entropy(c) * len(c) / n_samples_b
for c in children])))
return imp
data = np.array([[0, 0, 1, 0, 0, 1, 0, 1],
[1, 0, 1, 1, 1, 0, 1, 2],
[1, 0, 1, 1, 0, 1, 1, 3],
[0, 1, 1, 1, 0, 1, 0, 4],
[1, 1, 0, 1, 0, 1, 1, 5],
[1, 1, 0, 1, 1, 1, 1, 6],
[1, 0, 1, 0, 0, 1, 0, 7],
[1, 1, 1, 1, 1, 1, 1, 8],
[1, 1, 1, 1, 0, 1, 1, 9],
[1, 1, 1, 0, 1, 1, 1, 0]])
X, y = np.array(data[:, :7], dtype=np.bool), data[:, 7]
n_features = X.shape[1]
# Compute true importances
true_importances = np.zeros(n_features)
for i in range(n_features):
true_importances[i] = mdi_importance(i, X, y)
# Estimate importances with totally randomized trees
clf = ExtraTreesClassifier(n_estimators=500,
max_features=1,
criterion="entropy",
random_state=0).fit(X, y)
importances = sum(tree.tree_.compute_feature_importances(normalize=False)
for tree in clf.estimators_) / clf.n_estimators
# Check correctness
assert_almost_equal(entropy(y), sum(importances))
assert_less(np.abs(true_importances - importances).mean(), 0.01)
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(random_state=0,
n_samples=50)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
@ignore_warnings
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
@ignore_warnings
def test_1d_input():
X = iris.data[:, 0]
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert(n_classes=15):
classifier = RandomForestClassifier(random_state=0, bootstrap=False)
X = np.eye(n_classes)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:n_classes]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(classifier.classes_, y)
assert_array_equal(result, y)
| |
""" Lower level layer for slicer.
Mom's spaghetti.
"""
# TODO: Consider boolean array indexing.
from typing import Any, AnyStr, Union, List, Tuple
from abc import abstractmethod
import numbers
class AtomicSlicer:
""" Wrapping object that will unify slicing across data structures.
What we support:
Basic indexing (return references):
- (start:stop:step) slicing
- support ellipses
Advanced indexing (return references):
- integer array indexing
Numpy Reference:
Basic indexing (return views):
- (start:stop:step) slicing
- support ellipses and newaxis (alias for None)
Advanced indexing (return copy):
- integer array indexing, i.e. X[[1,2], [3,4]]
- boolean array indexing
- mixed array indexing (has integer array, ellipses, newaxis in same slice)
"""
def __init__(self, o: Any, max_dim: Union[None, int, AnyStr] = "auto"):
""" Provides a consistent slicing API to the object provided.
Args:
o: Object to enable consistent slicing.
Currently supports numpy dense arrays, recursive lists ending with list or numpy.
max_dim: Max number of dimensions the wrapped object has.
If set to "auto", max dimensions will be inferred. This comes at compute cost.
"""
self.o = o
self.max_dim = max_dim
if self.max_dim == "auto":
self.max_dim = UnifiedDataHandler.max_dim(o)
def __repr__(self) -> AnyStr:
""" Override default repr for human readability.
Returns:
String to display.
"""
return f"{self.__class__.__name__}({self.o.__repr__()})"
def __getitem__(self, item: Any) -> Any:
""" Consistent slicing into wrapped object.
Args:
item: Slicing key of type integer or slice.
Returns:
Sliced object.
Raises:
ValueError: If slicing is not compatible with wrapped object.
"""
# Turn item into tuple if not already.
index_tup = unify_slice(item, self.max_dim)
# Slice according to object type.
return UnifiedDataHandler.slice(self.o, index_tup, self.max_dim)
def unify_slice(item: Any, max_dim: int, alias_lookup=None) -> Tuple:
""" Resolves aliases and ellipses in a slice item.
Args:
item: Slicing key that is passed to __getitem__.
max_dim: Max dimension of object to be sliced.
alias_lookup: AliasLookup structure.
Returns:
A tuple representation of the item.
"""
item = _normalize_slice_key(item)
index_tup = _normalize_subkey_types(item)
index_tup = _handle_newaxis_ellipses(index_tup, max_dim)
if alias_lookup:
index_tup = _handle_aliases(index_tup, alias_lookup)
return index_tup
def _normalize_subkey_types(index_tup: Tuple) -> Tuple:
""" Casts subkeys into basic types such as int.
Args:
key: Slicing key that is passed within __getitem__.
Returns:
Tuple with subkeys casted to basic types.
"""
new_index_tup = [] # Gets casted to tuple at the end
np_int_types = {
"int8",
"int16",
"int32",
"int64",
"uint8",
"uint16",
"uint32",
"uint64",
}
for subkey in index_tup:
if _safe_isinstance(subkey, "numpy", np_int_types):
new_subkey = int(subkey)
elif _safe_isinstance(subkey, "numpy", "ndarray"):
if len(subkey.shape) == 1:
new_subkey = subkey.tolist()
else:
raise ValueError(f"Cannot use array of shape {subkey.shape} as subkey.")
else:
new_subkey = subkey
new_index_tup.append(new_subkey)
return tuple(new_index_tup)
def _normalize_slice_key(key: Any) -> Tuple:
""" Normalizes slice key into always being a top-level tuple.
Args:
key: Slicing key that is passed within __getitem__.
Returns:
Expanded slice as a tuple.
"""
if not isinstance(key, tuple):
return (key,)
else:
return key
def _handle_newaxis_ellipses(index_tup: Tuple, max_dim: int) -> Tuple:
""" Expands newaxis and ellipses within a slice for simplification.
This code is mostly adapted from: https://github.com/clbarnes/h5py_like/blob/master/h5py_like/shape_utils.py#L111
Args:
index_tup: Slicing key as a tuple.
max_dim: Maximum number of dimensions in the respective sliceable object.
Returns:
Expanded slice as a tuple.
"""
non_indexes = (None, Ellipsis)
concrete_indices = sum(idx not in non_indexes for idx in index_tup)
index_list = []
# newaxis_at = []
has_ellipsis = False
int_count = 0
for item in index_tup:
if isinstance(item, numbers.Number):
int_count += 1
# NOTE: If we need locations of new axis, re-enable this.
if item is None: # pragma: no cover
pass
# newaxis_at.append(len(index_list) + len(newaxis_at) - int_count)
elif item == Ellipsis:
if has_ellipsis: # pragma: no cover
raise IndexError("an index can only have a single ellipsis ('...')")
has_ellipsis = True
initial_len = len(index_list)
while len(index_list) + (concrete_indices - initial_len) < max_dim:
index_list.append(slice(None))
else:
index_list.append(item)
if len(index_list) > max_dim: # pragma: no cover
raise IndexError("too many indices for array")
while len(index_list) < max_dim:
index_list.append(slice(None))
# return index_list, newaxis_at
return tuple(index_list)
def _handle_aliases(index_tup: Tuple, alias_lookup) -> Tuple:
new_index_tup = []
def resolve(item, dim):
if isinstance(item, slice):
return item
# Replace element if in alias lookup, otherwise use original.
item = alias_lookup.get(dim, item, item)
return item
# Go through each element within the index and resolve if needed.
for dim, item in enumerate(index_tup):
if isinstance(item, list):
new_item = []
for sub_item in item:
new_item.append(resolve(sub_item, dim))
else:
new_item = resolve(item, dim)
new_index_tup.append(new_item)
return tuple(new_index_tup)
class Tracked(AtomicSlicer):
""" Tracked defines an object that slicer wraps."""
def __init__(self, o: Any, dim: Union[int, List, tuple, None, str] = "auto"):
""" Defines an object that will be wrapped by slicer.
Args:
o: Object that will be tracked for slicer.
dim: Target dimension(s) slicer will index on for this object.
"""
super().__init__(o)
# Protected attribute that can be overriden.
self._name = None
# Place dim into coordinate form.
if dim == "auto":
self.dim = list(range(self.max_dim))
elif dim is None:
self.dim = []
elif isinstance(dim, int):
self.dim = [dim]
elif isinstance(dim, list):
self.dim = dim
elif isinstance(dim, tuple):
self.dim = list(dim)
else: # pragma: no cover
raise ValueError(f"Cannot handle dim of type: {type(dim)}")
class Obj(Tracked):
""" An object that slicer wraps. """
def __init__(self, o, dim="auto"):
super().__init__(o, dim)
class Alias(Tracked):
""" Defines a tracked object as well as additional __getitem__ keys. """
def __init__(self, o, dim):
if not (
isinstance(dim, int) or (isinstance(dim, (list, tuple)) and len(dim) <= 1)
): # pragma: no cover
raise ValueError("Aliases must track a single dimension")
super().__init__(o, dim)
class AliasLookup:
def __init__(self, aliases):
self._lookup = {}
# Populate lookup and merge indexes.
for _, alias in aliases.items():
self.update(alias)
def update(self, alias):
if alias.dim is None or len(alias.dim) == 0:
return
dim = alias.dim[0]
if dim not in self._lookup:
self._lookup[dim] = {}
dim_lookup = self._lookup[dim]
# NOTE: Alias must be backed by either a list or dictionary.
itr = enumerate(alias.o) if isinstance(alias.o, list) else alias.o.items()
for i, x in itr:
if x not in dim_lookup:
dim_lookup[x] = set()
dim_lookup[x].add(i)
def delete(self, alias):
'''Delete an alias that exists from lookup'''
dim = alias.dim[0]
dim_lookup = self._lookup[dim]
# NOTE: Alias must be backed by either a list or dictionary.
itr = enumerate(alias.o) if isinstance(alias.o, list) else alias.o.items()
for i, x in itr:
del dim_lookup[x]
def get(self, dim, target, default=None):
if dim not in self._lookup:
return default
indexes = self._lookup[dim].get(target, None)
if indexes is None:
return default
if len(indexes) == 1:
return next(iter(indexes))
else:
return list(indexes)
def resolve_dim(slicer_index: Tuple, slicer_dim: List) -> List:
""" Extracts new dim after applying slicing index and maps it back to the original index list. """
new_slicer_dim = []
reduced_mask = []
for _, curr_idx in enumerate(slicer_index):
if isinstance(curr_idx, (tuple, list, slice)):
reduced_mask.append(0)
else:
reduced_mask.append(1)
for curr_dim in slicer_dim:
if reduced_mask[curr_dim] == 0:
new_slicer_dim.append(curr_dim - sum(reduced_mask[:curr_dim]))
return new_slicer_dim
def reduced_o(tracked: Tracked) -> Union[List, Any]:
os = [t.o for t in tracked]
os = os[0] if len(os) == 1 else os
return os
class BaseHandler:
@classmethod
@abstractmethod
def head_slice(cls, o, index_tup, max_dim):
raise NotImplementedError() # pragma: no cover
@classmethod
@abstractmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
raise NotImplementedError() # pragma: no cover
@classmethod
@abstractmethod
def max_dim(cls, o):
raise NotImplementedError() # pragma: no cover
@classmethod
def default_alias(cls, o):
return []
class SeriesHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
head_index = index_tup[0]
is_element = True if isinstance(head_index, int) else False
sliced_o = o.iloc[head_index]
return is_element, sliced_o, 1
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
# NOTE: Series only has one dimension,
# call slicer again to end the recursion.
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
@classmethod
def max_dim(cls, o):
return len(o.shape)
@classmethod
def default_alias(cls, o):
index_alias = Alias(o.index.to_list(), 0)
index_alias._name = "index"
return [index_alias]
class DataFrameHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
# NOTE: At head slice, we know there are two fixed dimensions.
cut_index = index_tup
is_element = True if isinstance(cut_index[-1], int) else False
sliced_o = o.iloc[cut_index]
return is_element, sliced_o, 2
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
# NOTE: Dataframe has fixed dimensions,
# call slicer again to end the recursion.
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
@classmethod
def max_dim(cls, o):
return len(o.shape)
@classmethod
def default_alias(cls, o):
index_alias = Alias(o.index.to_list(), 0)
index_alias._name = "index"
column_alias = Alias(o.columns.to_list(), 1)
column_alias._name = "columns"
return [index_alias, column_alias]
class ArrayHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
# Check if head is string
head_index, tail_index = index_tup[0], index_tup[1:]
cut = 1
for sub_index in tail_index:
if isinstance(sub_index, str) or cut == len(o.shape):
break
cut += 1
# Process native array dimensions
cut_index = index_tup[:cut]
is_element = any([True if isinstance(x, int) else False for x in cut_index])
sliced_o = o[cut_index]
return is_element, sliced_o, cut
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
if flatten:
# NOTE: If we're dealing with a scipy matrix,
# we have to manually flatten it ourselves
# to keep consistent to the rest of slicer's API.
if _safe_isinstance(o, "scipy.sparse.csc", "csc_matrix"):
return AtomicSlicer(o.toarray().flatten(), max_dim=max_dim)[tail_index]
elif _safe_isinstance(o, "scipy.sparse.csr", "csr_matrix"):
return AtomicSlicer(o.toarray().flatten(), max_dim=max_dim)[tail_index]
elif _safe_isinstance(o, "scipy.sparse.dok", "dok_matrix"):
return AtomicSlicer(o.toarray().flatten(), max_dim=max_dim)[tail_index]
elif _safe_isinstance(o, "scipy.sparse.lil", "lil_matrix"):
return AtomicSlicer(o.toarray().flatten(), max_dim=max_dim)[tail_index]
else:
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
else:
inner = [AtomicSlicer(e, max_dim=max_dim)[tail_index] for e in o]
if _safe_isinstance(o, "numpy", "ndarray"):
import numpy
if len(inner) > 0 and hasattr(inner[0], "__len__"):
ragged = not all(len(x) == len(inner[0]) for x in inner)
else:
ragged = False
if ragged:
return numpy.array(inner, dtype=numpy.object)
else:
return numpy.array(inner)
elif _safe_isinstance(o, "torch", "Tensor"):
import torch
if len(inner) > 0 and isinstance(inner[0], torch.Tensor):
return torch.stack(inner)
else:
return torch.tensor(inner)
elif _safe_isinstance(o, "scipy.sparse.csc", "csc_matrix"):
from scipy.sparse import vstack
out = vstack(inner, format='csc')
return out
elif _safe_isinstance(o, "scipy.sparse.csr", "csr_matrix"):
from scipy.sparse import vstack
out = vstack(inner, format='csr')
return out
elif _safe_isinstance(o, "scipy.sparse.dok", "dok_matrix"):
from scipy.sparse import vstack
out = vstack(inner, format='dok')
return out
elif _safe_isinstance(o, "scipy.sparse.lil", "lil_matrix"):
from scipy.sparse import vstack
out = vstack(inner, format='lil')
return out
else:
raise ValueError(f"Cannot handle type {type(o)}.") # pragma: no cover
@classmethod
def max_dim(cls, o):
if _safe_isinstance(o, "numpy", "ndarray") and o.dtype == "object":
return max([UnifiedDataHandler.max_dim(x) for x in o], default=-1) + 1
else:
return len(o.shape)
class DictHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
head_index = index_tup[0]
if isinstance(head_index, (tuple, list)) and len(index_tup) == 0:
return False, o, 1
if isinstance(head_index, (list, tuple)):
return (
False,
{
sub_index: AtomicSlicer(o, max_dim=max_dim)[sub_index]
for sub_index in head_index
},
1,
)
elif isinstance(head_index, slice):
if head_index == slice(None, None, None):
return False, o, 1
return False, o[head_index], 1
else:
return True, o[head_index], 1
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
if flatten:
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
else:
return {
k: AtomicSlicer(e, max_dim=max_dim)[tail_index] for k, e in o.items()
}
@classmethod
def max_dim(cls, o):
return max([UnifiedDataHandler.max_dim(x) for x in o.values()], default=-1) + 1
class ListTupleHandler(BaseHandler):
@classmethod
def head_slice(cls, o, index_tup, max_dim):
head_index = index_tup[0]
if isinstance(head_index, (tuple, list)) and len(index_tup) == 0:
return False, o, 1
if isinstance(head_index, (list, tuple)):
if len(head_index) == 0:
return False, o, 1
else:
results = [
AtomicSlicer(o, max_dim=max_dim)[sub_index]
for sub_index in head_index
]
results = tuple(results) if isinstance(o, tuple) else results
return False, results, 1
elif isinstance(head_index, slice):
return False, o[head_index], 1
elif isinstance(head_index, int):
return True, o[head_index], 1
else: # pragma: no cover
raise ValueError(f"Invalid key {head_index} for {o}")
@classmethod
def tail_slice(cls, o, tail_index, max_dim, flatten=True):
if flatten:
return AtomicSlicer(o, max_dim=max_dim)[tail_index]
else:
results = [AtomicSlicer(e, max_dim=max_dim)[tail_index] for e in o]
return tuple(results) if isinstance(o, tuple) else results
@classmethod
def max_dim(cls, o):
return max([UnifiedDataHandler.max_dim(x) for x in o], default=-1) + 1
class UnifiedDataHandler:
""" Registry that maps types to their unified slice calls."""
""" Class attribute that maps type to their unified slice calls."""
type_map = {
("builtins", "list"): ListTupleHandler,
("builtins", "tuple"): ListTupleHandler,
("builtins", "dict"): DictHandler,
("torch", "Tensor"): ArrayHandler,
("numpy", "ndarray"): ArrayHandler,
("scipy.sparse.csc", "csc_matrix"): ArrayHandler,
("scipy.sparse.csr", "csr_matrix"): ArrayHandler,
("scipy.sparse.dok", "dok_matrix"): ArrayHandler,
("scipy.sparse.lil", "lil_matrix"): ArrayHandler,
("pandas.core.frame", "DataFrame"): DataFrameHandler,
("pandas.core.series", "Series"): SeriesHandler,
}
@classmethod
def slice(cls, o, index_tup, max_dim):
# NOTE: Unified handles base cases such as empty tuples, which
# specialized handlers do not.
if isinstance(index_tup, (tuple, list)) and len(index_tup) == 0:
return o
# Slice as delegated by data handler.
o_type = _type_name(o)
head_slice = cls.type_map[o_type].head_slice
tail_slice = cls.type_map[o_type].tail_slice
is_element, sliced_o, cut = head_slice(o, index_tup, max_dim)
out = tail_slice(sliced_o, index_tup[cut:], max_dim - cut, is_element)
return out
@classmethod
def max_dim(cls, o):
o_type = _type_name(o)
if o_type not in cls.type_map:
return 0
return cls.type_map[o_type].max_dim(o)
@classmethod
def default_alias(cls, o):
o_type = _type_name(o)
if o_type not in cls.type_map:
return {}
return cls.type_map[o_type].default_alias(o)
def _type_name(o: object) -> Tuple[str, str]:
return o.__class__.__module__, o.__class__.__name__
def _safe_isinstance(
o: object, module_name: str, type_name: Union[str, set, tuple]
) -> bool:
o_module, o_type = _type_name(o)
if isinstance(type_name, str):
return o_module == module_name and o_type == type_name
else:
return o_module == module_name and o_type in type_name
| |
'''
Created on 4 Sep 2015
@author: maxz
'''
import unittest
import numpy as np
import GPy
class BGPLVMTest(unittest.TestCase):
def setUp(self):
np.random.seed(12345)
X, W = np.random.normal(0,1,(100,6)), np.random.normal(0,1,(6,13))
Y = X.dot(W) + np.random.normal(0, .1, (X.shape[0], W.shape[1]))
self.inan = np.random.binomial(1, .1, Y.shape).astype(bool)
self.X, self.W, self.Y = X,W,Y
self.Q = 3
self.m_full = GPy.models.BayesianGPLVM(Y, self.Q)
def test_lik_comparisons_m1_s0(self):
# Test if the different implementations give the exact same likelihood as the full model.
# All of the following settings should give the same likelihood and gradients as the full model:
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=True, stochastic=False)
m[:] = self.m_full[:]
np.testing.assert_almost_equal(m.log_likelihood(), self.m_full.log_likelihood(), 7)
np.testing.assert_allclose(m.gradient, self.m_full.gradient)
assert(m.checkgrad())
def test_predict_missing_data(self):
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=True, stochastic=True, batchsize=self.Y.shape[1])
m[:] = self.m_full[:]
np.testing.assert_almost_equal(m.log_likelihood(), self.m_full.log_likelihood(), 7)
np.testing.assert_allclose(m.gradient, self.m_full.gradient)
self.assertRaises(NotImplementedError, m.predict, m.X, full_cov=True)
mu1, var1 = m.predict(m.X, full_cov=False)
mu2, var2 = self.m_full.predict(self.m_full.X, full_cov=False)
np.testing.assert_allclose(mu1, mu2)
np.testing.assert_allclose(var1, var2)
mu1, var1 = m.predict(m.X.mean, full_cov=True)
mu2, var2 = self.m_full.predict(self.m_full.X.mean, full_cov=True)
np.testing.assert_allclose(mu1, mu2)
np.testing.assert_allclose(var1[:,:,0], var2)
mu1, var1 = m.predict(m.X.mean, full_cov=False)
mu2, var2 = self.m_full.predict(self.m_full.X.mean, full_cov=False)
np.testing.assert_allclose(mu1, mu2)
np.testing.assert_allclose(var1[:,[0]], var2)
def test_lik_comparisons_m0_s0(self):
# Test if the different implementations give the exact same likelihood as the full model.
# All of the following settings should give the same likelihood and gradients as the full model:
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, X_variance=self.m_full.X.variance.values, missing_data=False, stochastic=False)
m[:] = self.m_full[:]
np.testing.assert_almost_equal(m.log_likelihood(), self.m_full.log_likelihood(), 7)
np.testing.assert_allclose(m.gradient, self.m_full.gradient)
assert(m.checkgrad())
def test_lik_comparisons_m1_s1(self):
# Test if the different implementations give the exact same likelihood as the full model.
# All of the following settings should give the same likelihood and gradients as the full model:
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=True, stochastic=True, batchsize=self.Y.shape[1])
m[:] = self.m_full[:]
np.testing.assert_almost_equal(m.log_likelihood(), self.m_full.log_likelihood(), 7)
np.testing.assert_allclose(m.gradient, self.m_full.gradient)
assert(m.checkgrad())
def test_lik_comparisons_m0_s1(self):
# Test if the different implementations give the exact same likelihood as the full model.
# All of the following settings should give the same likelihood and gradients as the full model:
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=False, stochastic=True, batchsize=self.Y.shape[1])
m[:] = self.m_full[:]
np.testing.assert_almost_equal(m.log_likelihood(), self.m_full.log_likelihood(), 7)
np.testing.assert_allclose(m.gradient, self.m_full.gradient)
assert(m.checkgrad())
def test_gradients_missingdata(self):
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=True, stochastic=False, batchsize=self.Y.shape[1])
assert(m.checkgrad())
def test_gradients_missingdata_stochastics(self):
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=True, stochastic=True, batchsize=1)
assert(m.checkgrad())
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=True, stochastic=True, batchsize=4)
assert(m.checkgrad())
def test_gradients_stochastics(self):
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=False, stochastic=True, batchsize=1)
assert(m.checkgrad())
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=False, stochastic=True, batchsize=4)
assert(m.checkgrad())
def test_predict(self):
# Test if the different implementations give the exact same likelihood as the full model.
# All of the following settings should give the same likelihood and gradients as the full model:
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, missing_data=True, stochastic=True, batchsize=self.Y.shape[1])
m[:] = self.m_full[:]
np.testing.assert_almost_equal(m.log_likelihood(), self.m_full.log_likelihood(), 7)
np.testing.assert_allclose(m.gradient, self.m_full.gradient)
assert(m.checkgrad())
class SparseGPMinibatchTest(unittest.TestCase):
def setUp(self):
np.random.seed(12345)
X, W = np.random.normal(0,1,(100,6)), np.random.normal(0,1,(6,13))
Y = X.dot(W) + np.random.normal(0, .1, (X.shape[0], W.shape[1]))
self.inan = np.random.binomial(1, .1, Y.shape).astype(bool)
self.X, self.W, self.Y = X,W,Y
self.Q = 3
self.m_full = GPy.models.SparseGPLVM(Y, self.Q, kernel=GPy.kern.RBF(self.Q, ARD=True))
def test_lik_comparisons_m1_s0(self):
# Test if the different implementations give the exact same likelihood as the full model.
# All of the following settings should give the same likelihood and gradients as the full model:
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, X_variance=False, missing_data=True, stochastic=False)
m[:] = self.m_full[:]
np.testing.assert_almost_equal(m.log_likelihood(), self.m_full.log_likelihood(), 7)
np.testing.assert_allclose(m.gradient, self.m_full.gradient)
assert(m.checkgrad())
def test_sparsegp_init(self):
# Test if the different implementations give the exact same likelihood as the full model.
# All of the following settings should give the same likelihood and gradients as the full model:
try:
np.random.seed(1234)
Z = self.X[np.random.choice(self.X.shape[0], replace=False, size=10)].copy()
Q = Z.shape[1]
m = GPy.models.sparse_gp_minibatch.SparseGPMiniBatch(self.X, self.Y, Z, GPy.kern.RBF(Q)+GPy.kern.Matern32(Q)+GPy.kern.Bias(Q), GPy.likelihoods.Gaussian(), missing_data=True, stochastic=False)
assert(m.checkgrad())
m.optimize('adadelta', max_iters=10)
assert(m.checkgrad())
m = GPy.models.sparse_gp_minibatch.SparseGPMiniBatch(self.X, self.Y, Z, GPy.kern.RBF(Q)+GPy.kern.Matern32(Q)+GPy.kern.Bias(Q), GPy.likelihoods.Gaussian(), missing_data=True, stochastic=True)
assert(m.checkgrad())
m.optimize('rprop', max_iters=10)
assert(m.checkgrad())
m = GPy.models.sparse_gp_minibatch.SparseGPMiniBatch(self.X, self.Y, Z, GPy.kern.RBF(Q)+GPy.kern.Matern32(Q)+GPy.kern.Bias(Q), GPy.likelihoods.Gaussian(), missing_data=False, stochastic=False)
assert(m.checkgrad())
m.optimize('rprop', max_iters=10)
assert(m.checkgrad())
m = GPy.models.sparse_gp_minibatch.SparseGPMiniBatch(self.X, self.Y, Z, GPy.kern.RBF(Q)+GPy.kern.Matern32(Q)+GPy.kern.Bias(Q), GPy.likelihoods.Gaussian(), missing_data=False, stochastic=True)
assert(m.checkgrad())
m.optimize('adadelta', max_iters=10)
assert(m.checkgrad())
except ImportError:
from nose import SkipTest
raise SkipTest('climin not installed, skipping stochastic gradients')
def test_predict_missing_data(self):
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, X_variance=False, missing_data=True, stochastic=True, batchsize=self.Y.shape[1])
m[:] = self.m_full[:]
np.testing.assert_almost_equal(m.log_likelihood(), self.m_full.log_likelihood(), 7)
np.testing.assert_allclose(m.gradient, self.m_full.gradient)
mu1, var1 = m.predict(m.X, full_cov=False)
mu2, var2 = self.m_full.predict(self.m_full.X, full_cov=False)
np.testing.assert_allclose(mu1, mu2)
for i in range(var1.shape[1]):
np.testing.assert_allclose(var1[:,[i]], var2)
mu1, var1 = m.predict(m.X, full_cov=True)
mu2, var2 = self.m_full.predict(self.m_full.X, full_cov=True)
np.testing.assert_allclose(mu1, mu2)
for i in range(var1.shape[2]):
np.testing.assert_allclose(var1[:,:,i], var2)
def test_lik_comparisons_m0_s0(self):
# Test if the different implementations give the exact same likelihood as the full model.
# All of the following settings should give the same likelihood and gradients as the full model:
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, X_variance=False, missing_data=False, stochastic=False)
m[:] = self.m_full[:]
np.testing.assert_almost_equal(m.log_likelihood(), self.m_full.log_likelihood(), 7)
np.testing.assert_allclose(m.gradient, self.m_full.gradient)
assert(m.checkgrad())
def test_lik_comparisons_m1_s1(self):
# Test if the different implementations give the exact same likelihood as the full model.
# All of the following settings should give the same likelihood and gradients as the full model:
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, X_variance=False, missing_data=True, stochastic=True, batchsize=self.Y.shape[1])
m[:] = self.m_full[:]
np.testing.assert_almost_equal(m.log_likelihood(), self.m_full.log_likelihood(), 7)
np.testing.assert_allclose(m.gradient, self.m_full.gradient)
assert(m.checkgrad())
def test_lik_comparisons_m0_s1(self):
# Test if the different implementations give the exact same likelihood as the full model.
# All of the following settings should give the same likelihood and gradients as the full model:
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, X_variance=False, missing_data=False, stochastic=True, batchsize=self.Y.shape[1])
m[:] = self.m_full[:]
np.testing.assert_almost_equal(m.log_likelihood(), self.m_full.log_likelihood(), 7)
np.testing.assert_allclose(m.gradient, self.m_full.gradient)
assert(m.checkgrad())
def test_gradients_missingdata(self):
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, X_variance=False, missing_data=True, stochastic=False, batchsize=self.Y.shape[1])
assert(m.checkgrad())
def test_gradients_missingdata_stochastics(self):
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, X_variance=False, missing_data=True, stochastic=True, batchsize=1)
assert(m.checkgrad())
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, X_variance=False, missing_data=True, stochastic=True, batchsize=4)
assert(m.checkgrad())
def test_gradients_stochastics(self):
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, X_variance=False, missing_data=False, stochastic=True, batchsize=1)
assert(m.checkgrad())
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, X_variance=False, missing_data=False, stochastic=True, batchsize=4)
assert(m.checkgrad())
def test_predict(self):
# Test if the different implementations give the exact same likelihood as the full model.
# All of the following settings should give the same likelihood and gradients as the full model:
m = GPy.models.bayesian_gplvm_minibatch.BayesianGPLVMMiniBatch(self.Y, self.Q, X_variance=False, missing_data=True, stochastic=True, batchsize=self.Y.shape[1])
m[:] = self.m_full[:]
np.testing.assert_almost_equal(m.log_likelihood(), self.m_full.log_likelihood(), 7)
np.testing.assert_allclose(m.gradient, self.m_full.gradient)
assert(m.checkgrad())
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| |
#!/usr/bin/env python
"""
Main file for a home monitoring system on the Raspberry Pi 2.
Requires --conf (JSON) and --car and --face (XML's) arguments.
Utilizes the Raspberry Pi 2 camera module and OpenCV framework.
Part of ELEC4500 Senior Electronic Design I, Spring 2016
Dept. of Electrical Engineering and Technology
Wentworth Institute of Technology.
"""
import argparse
import json
import sys
import time
from threading import Thread
import warnings
# Path added to access default site packages (cv2) on RPi2.
sys.path.append('/usr/local/lib/python2.7/site-packages')
import cv2
from scrying_utils.camera_func import Scry
import scrying_utils.init_logger as scrying_log
from scrying_utils.haarcascade import HaarDetect
import scrying_utils.preprocessing_func as prepro
import scrying_utils.time_disp as time_disp
import imutils
from imutils.video import FPS
from imutils.object_detection import non_max_suppression
import numpy as np
from picamera.array import PiRGBArray
from picamera import PiCamera
# Front matter.
__author__ = "Sze 'Ron' Chau"
__credits__ = ["Adrian Rosebrock", "Nelson Bamford", "Ariel Martinez"]
__license__ = "MIT"
__maintainer__ = "Sze 'Ron' Chau"
__email__ = "wodiesan@gmail.com"
# Get the local timezone to convert UTC time for display.
local_tz = time_disp.get_localzone()
# Init logging to console and files.
logger_rpi = 'history/'
user_log = 'log_scrying_user.log'
dev_log = 'log_scrying_dev.log'
logger = scrying_log.init_logger(__name__, logger_rpi, user_log, dev_log)
logger.debug('Init logging to console and files successful.')
# Construct the argument parse.
logger.debug('Parsing command-line arguments.')
ap = argparse.ArgumentParser()
ap.add_argument("-n", "--num-frames", type=int, default=100,
help="# of frames to loop over for FPS test")
ap.add_argument("-c", "--conf", required=True, help="JSON camera config path.")
ap.add_argument("-f", "--face", required=False,
help="Face Haar path.")
ap.add_argument("-v", "--vehi", action='store_true', required=False,
help="Vehicle Haar path.")
args = vars(ap.parse_args())
# Facial detection.
fd = HaarDetect(args['face'])
# Alert the user if any of the optional command-line options are missing.
if not args['face']:
logger.error('No Haar facial classifer found. Skipping facial detect.')
if not args['vehi']:
logger.error('No Haar vehicle classifier found. Skipping vehicle detect.')
# Supress expected warnings and access camera config file.
warnings.filterwarnings("ignore")
conf = json.load(open(args["conf"]))
logger.debug('Command-line arguments loaded.')
# Init camera module with JSON config file.
logger.debug('Initiating camera module.')
res = tuple(conf["480p"])
fps = conf["fps"]
rot = conf["rotation"]
# try:
# camera = PiCamera()
# camera.resolution = tuple(conf["480p"])
# camera.framerate = conf["fps"]
# camera.rotation = conf["rotation"]
# except PiCamera.exc.PiCameraError:
# logger.critical('Unable to access camera module. Exiting.')
# sys.exit()
# Init seperate thread for pipeline.
# rawCapture = PiRGBArray(camera, size=tuple(conf["480p"]))
camera = Scry().start()
time.sleep(conf["camera_warmup_time"])
logger.debug('\n\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
' Streaming {} resolution at {} fps. \n'
'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
''.format(res, fps))
# Init HOG detector, set SVM to pre-trained human silhouette detector.
# DISABLED FOR SENIOR DESIGN SHOWCASE ON 09AUG2016.
# hog = cv2.HOGDescriptor()
# hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
# Capture frames from camera module.
# camera.start()
# for f in camera.capture_continuous(rawCapture, format="bgr",
# use_video_port=True):
logger.debug('Begin FPS counter.')
fps = FPS().start()
# While statement for multithreaded approach; for statement otherwise.
# for f in camera.capture_continuous(rawCapture, format="bgr",
# use_video_port=True):
while camera:
# Grab raw numpy array representing img and init on-screen status text.
# frame = f.array
frame = camera.read()
text = "Clear"
# Resize frame, preprocess with grayscale and Gaussian blur.
# frame = imutils.resize(frame, width=conf["width_480p"])
gray = prepro.grayscale(frame)
# gray = prepro.gauss(frame, (21, 21), 0)
# Detect faces in the image and then clone the frame
# so that we can draw on it.
# faceRects = fd.detect(gray, scaleFac=1.1, minNbrs=5,
# minSize=(30, 30))
faceRects = fd.detect(gray, 1.1, 5, (30, 30))
frameClone = frame.copy()
# Loop over the face bounding boxes and draw them.
for (fX, fY, fW, fH) in faceRects:
cv2.rectangle(frameClone, (fX, fY), (fX + fW, fY + fH), (0, 255, 0), 3)
text = 'Face detected!'
# Detect silhouettes in frame. Adjust scale based on camera location.
# winStride is the sliding window step size coordinates.
# A > scale val evals < layers, t.f. it runs faster but less accurate.
# weights is the confidence val returned from SVG per detection.
# DISABLED FOR SENIOR DESIGN SHOWCASE ON 09AUG2016.
# (bodyRects, weights) = hog.detectMultiScale(gray, winStride=(4, 4),
# padding=(8, 8), scale=1.05)
# Draw bounding boxes on detected regions of the clone frame.
# for (x, y, w, h) in bodyRects:
# cv2.rectangle(frameClone,
# (x, y),
# (x + w, y + h),
# tuple(conf["green"]),
# 1)
# Apply non-maxima suppression to bounding boxes using a large overlap
# thresh to try to maintain overlapping boxes.
# DISABLED FOR SENIOR DESIGN SHOWCASE ON 09AUG2016.
# bodyRects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in bodyRects])
# pick = non_max_suppression(bodyRects, probs=None, overlapThresh=0.65)
# Draw the final bounding boxes on the clone.
# DISABLED FOR SENIOR DESIGN SHOWCASE ON 09AUG2016.
# for (xA, yA, xB, yB) in pick:
# cv2.rectangle(frameClone,
# (xA, yA),
# (xB, yB),
# tuple(conf["tyrian"]),
# 2)
# text = 'Silhouette'
# Populate the display feed with the time and relevant status information.
# ts = timestamp.strftime("%A %d %B %Y %I:%M:%S%p")
cv2.putText(frameClone,
"Sector: {}".format(text),
(10, 20),
cv2.FONT_HERSHEY_SIMPLEX,
conf["font_sector"],
tuple(conf["red"]),
2)
cv2.putText(frameClone,
str(time_disp.utcnow(local_tz)),
(10, frame.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX,
conf["font_time"],
tuple(conf["red"]),
1)
# Verify whether the frames should be displayed to screen.
if conf["show_video"]:
cv2.imshow("Scrying", frameClone)
key = cv2.waitKey(1) & 0xFF
# Pressing 'q' at anytime to terminate and exit.
if key == ord("q"):
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
logger.debug('User terminated program. Exiting.')
cv2.destroyAllWindows()
camera.stop()
break
# Update FPS counter.
fps.update()
# Clear stream in preparation for the next frame.
# Comment out for multithreaded implementation.
# rawCapture.truncate(0)
| |
# Copyright (c) 2003-2005 Jimmy Retzlaff, 2008 Konstantin Yegupov
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Unix version uses unrar command line executable
import subprocess
import gc
import os, os.path
import time, re
from rar_exceptions import *
class UnpackerNotInstalled(Exception): pass
rar_executable_cached = None
rar_executable_version = None
def call_unrar(params):
"Calls rar/unrar command line executable, returns stdout pipe"
global rar_executable_cached
if rar_executable_cached is None:
for command in ('unrar', 'rar'):
try:
subprocess.Popen([command], stdout=subprocess.PIPE)
rar_executable_cached = command
break
except OSError:
pass
if rar_executable_cached is None:
raise UnpackerNotInstalled("No suitable RAR unpacker installed")
assert type(params) == list, "params must be list"
args = [rar_executable_cached] + params
try:
gc.disable() # See http://bugs.python.org/issue1336
return subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
finally:
gc.enable()
class RarFileImplementation(object):
def init(self, password=None):
global rar_executable_version
self.password = password
stdoutdata, stderrdata = self.call('v', []).communicate()
for line in stderrdata.splitlines():
if line.strip().startswith("Cannot open"):
raise FileOpenError
if line.find("CRC failed")>=0:
raise IncorrectRARPassword
accum = []
source = iter(stdoutdata.splitlines())
line = ''
while (line.find('RAR ') == -1):
line = source.next()
signature = line
# The code below is mighty flaky
# and will probably crash on localized versions of RAR
# but I see no safe way to rewrite it using a CLI tool
if signature.find("RAR 4") > -1:
rar_executable_version = 4
while not (line.startswith('Comment:') or line.startswith('Pathname/Comment')):
if line.strip().endswith('is not RAR archive'):
raise InvalidRARArchive
line = source.next()
while not line.startswith('Pathname/Comment'):
accum.append(line.rstrip('\n'))
line = source.next()
if len(accum):
accum[0] = accum[0][9:] # strip out "Comment:" part
self.comment = '\n'.join(accum[:-1])
else:
self.comment = None
elif signature.find("RAR 5") > -1:
rar_executable_version = 5
line = source.next()
while not line.startswith('Archive:'):
if line.strip().endswith('is not RAR archive'):
raise InvalidRARArchive
accum.append(line.rstrip('\n'))
line = source.next()
if len(accum):
self.comment = '\n'.join(accum[:-1]).strip()
else:
self.comment = None
else:
raise UnpackerNotInstalled("Unsupported RAR version, expected 4.x or 5.x, found: "
+ signature.split(" ")[1])
def escaped_password(self):
return '-' if self.password == None else self.password
def call(self, cmd, options=[], files=[]):
options2 = options + ['p'+self.escaped_password()]
soptions = ['-'+x for x in options2]
return call_unrar([cmd]+soptions+['--',self.archiveName]+files)
def infoiter(self):
command = "v" if rar_executable_version == 4 else "l"
stdoutdata, stderrdata = self.call(command, ['c-']).communicate()
for line in stderrdata.splitlines():
if line.strip().startswith("Cannot open"):
raise FileOpenError
accum = []
source = iter(stdoutdata.splitlines())
line = ''
while not line.startswith('-----------'):
if line.strip().endswith('is not RAR archive'):
raise InvalidRARArchive
if line.startswith("CRC failed") or line.startswith("Checksum error"):
raise IncorrectRARPassword
line = source.next()
line = source.next()
i = 0
re_spaces = re.compile(r"\s+")
if rar_executable_version == 4:
while not line.startswith('-----------'):
accum.append(line)
if len(accum)==2:
data = {}
data['index'] = i
# asterisks mark password-encrypted files
data['filename'] = accum[0].strip().lstrip("*") # asterisks marks password-encrypted files
fields = re_spaces.split(accum[1].strip())
data['size'] = int(fields[0])
attr = fields[5]
data['isdir'] = 'd' in attr.lower()
data['datetime'] = time.strptime(fields[3]+" "+fields[4], '%d-%m-%y %H:%M')
data['comment'] = None
data['volume'] = None
yield data
accum = []
i += 1
line = source.next()
elif rar_executable_version == 5:
while not line.startswith('-----------'):
fields = line.strip().lstrip("*").split()
data = {}
data['index'] = i
data['filename'] = " ".join(fields[4:])
data['size'] = int(fields[1])
attr = fields[0]
data['isdir'] = 'd' in attr.lower()
data['datetime'] = time.strptime(fields[2]+" "+fields[3], '%d-%m-%y %H:%M')
data['comment'] = None
data['volume'] = None
yield data
i += 1
line = source.next()
def read_files(self, checker):
res = []
for info in self.infoiter():
checkres = checker(info)
if checkres==True and not info.isdir:
pipe = self.call('p', ['inul'], [info.filename]).stdout
res.append((info, pipe.read()))
return res
def extract(self, checker, path, withSubpath, overwrite):
res = []
command = 'x'
if not withSubpath:
command = 'e'
options = []
if overwrite:
options.append('o+')
else:
options.append('o-')
if not path.endswith(os.sep):
path += os.sep
names = []
for info in self.infoiter():
checkres = checker(info)
if type(checkres) in [str, unicode]:
raise NotImplementedError("Condition callbacks returning strings are deprecated and only supported in Windows")
if checkres==True and not info.isdir:
names.append(info.filename)
res.append(info)
names.append(path)
proc = self.call(command, options, names)
stdoutdata, stderrdata = proc.communicate()
if stderrdata.find("CRC failed")>=0 or stderrdata.find("Checksum error")>=0:
raise IncorrectRARPassword
return res
def destruct(self):
pass
def get_volume(self):
command = "v" if rar_executable_version == 4 else "l"
stdoutdata, stderrdata = self.call(command, ['c-']).communicate()
for line in stderrdata.splitlines():
if line.strip().startswith("Cannot open"):
raise FileOpenError
source = iter(stdoutdata.splitlines())
line = ''
while not line.startswith('-----------'):
if line.strip().endswith('is not RAR archive'):
raise InvalidRARArchive
if line.startswith("CRC failed") or line.startswith("Checksum error"):
raise IncorrectRARPassword
line = source.next()
line = source.next()
if rar_executable_version == 4:
while not line.startswith('-----------'):
line = source.next()
line = source.next()
items = line.strip().split()
if len(items)>4 and items[4]=="volume":
return int(items[5]) - 1
else:
return None
elif rar_executable_version == 5:
while not line.startswith('-----------'):
line = source.next()
line = source.next()
items = line.strip().split()
if items[1]=="volume":
return int(items[2]) - 1
else:
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.