code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
"""pypyr step that executes a string as python.
Uses python's exec() to evaluate and execute arbitrary python code.
"""
import builtins
import logging
# logger means the log level will be set correctly
logger = logging.getLogger(__name__)
def run_step(context):
"""Execute dynamic python code.
Takes two forms of input:
py: exec contents as dynamically interpreted python statements, with
contents of context available as vars.
pycode: exec contents as dynamically interpreted python statements,
with the context object itself available as a var.
Args:
context (pypyr.context.Context): Mandatory.
Context is a dictionary or dictionary-like.
Context must contain key 'py' or 'pycode'
"""
logger.debug("started")
if 'pycode' in context:
exec_pycode(context)
else:
context.assert_key_has_value(key='py', caller=__name__)
# .copy() is significantly faster than globals = dict(context)
# https://bugs.python.org/issue31179
globals = context.copy()
globals['__builtins__'] = builtins.__dict__
# the save function ref allows pipeline to use save to persist vars
# back to context,
globals['save'] = get_save(context, globals)
exec(context['py'], globals)
logger.debug("done")
def exec_pycode(context):
"""Exec contents of pycode.
This form of execute means pycode does not have the contents of context in
the exec namespace, so referencing context needs to do:
a = context['myvar']
Rather than just
a = myvar
Args:
context (pypyr.context.Content): context containing `pycode` key.
Returns:
None. Any mutations to content is on the input arg instance itself.
"""
context.assert_key_has_value(key='pycode', caller=__name__)
logger.debug("Executing python string: %s", context['pycode'])
exec(context['pycode'], {'__builtins__': builtins.__dict__,
'context': context})
def get_save(context, namespace):
"""Return save function reference."""
def save(*args, **kwargs):
"""Save variables in exec namespace back to context.
Args:
context: instance of context to which to save vars from namespace.
namespace: set var values from this namespace.
Returns:
None. Mutates context.
"""
d = {}
for arg in args:
try:
d[arg] = namespace[arg]
except KeyError as err:
raise KeyError(f"Trying to save '{arg}', but can't find it "
"in the py step scope. Remember it should be "
"save('key'), not save(key) - mind the "
"quotes.") from err
# kwargs is {} if not set, no None worries.
d.update(**kwargs)
context.update(d)
return save
|
pypyr/pypyr-cli
|
pypyr/steps/py.py
|
Python
|
apache-2.0
| 2,984
|
# Copyright 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import hashlib
import os
import time
import uuid
import mock
from oslo_concurrency import processutils as putils
from oslo_config import cfg
from oslo_utils import timeutils
import paramiko
import six
import cinder
from cinder import exception
from cinder import ssh_utils
from cinder import test
from cinder import utils
CONF = cfg.CONF
class ExecuteTestCase(test.TestCase):
@mock.patch('cinder.utils.processutils.execute')
def test_execute(self, mock_putils_exe):
output = utils.execute('a', 1, foo='bar')
self.assertEqual(mock_putils_exe.return_value, output)
mock_putils_exe.assert_called_once_with('a', 1, foo='bar')
@mock.patch('cinder.utils.get_root_helper')
@mock.patch('cinder.utils.processutils.execute')
def test_execute_root(self, mock_putils_exe, mock_get_helper):
output = utils.execute('a', 1, foo='bar', run_as_root=True)
self.assertEqual(mock_putils_exe.return_value, output)
mock_helper = mock_get_helper.return_value
mock_putils_exe.assert_called_once_with('a', 1, foo='bar',
run_as_root=True,
root_helper=mock_helper)
@mock.patch('cinder.utils.get_root_helper')
@mock.patch('cinder.utils.processutils.execute')
def test_execute_root_and_helper(self, mock_putils_exe, mock_get_helper):
mock_helper = mock.Mock()
output = utils.execute('a', 1, foo='bar', run_as_root=True,
root_helper=mock_helper)
self.assertEqual(mock_putils_exe.return_value, output)
self.assertFalse(mock_get_helper.called)
mock_putils_exe.assert_called_once_with('a', 1, foo='bar',
run_as_root=True,
root_helper=mock_helper)
class GetFromPathTestCase(test.TestCase):
def test_tolerates_nones(self):
f = utils.get_from_path
input = []
self.assertEqual([], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [None]
self.assertEqual([], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': None}]
self.assertEqual([], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': None}}]
self.assertEqual([{'b': None}], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': None}}}]
self.assertEqual([{'b': {'c': None}}], f(input, "a"))
self.assertEqual([{'c': None}], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': None}}}, {'a': None}]
self.assertEqual([{'b': {'c': None}}], f(input, "a"))
self.assertEqual([{'c': None}], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}]
self.assertEqual([{'b': {'c': None}}, {'b': None}], f(input, "a"))
self.assertEqual([{'c': None}], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
def test_does_select(self):
f = utils.get_from_path
input = [{'a': 'a_1'}]
self.assertEqual(['a_1'], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': 'b_1'}}]
self.assertEqual([{'b': 'b_1'}], f(input, "a"))
self.assertEqual(['b_1'], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}}]
self.assertEqual([{'b': {'c': 'c_1'}}], f(input, "a"))
self.assertEqual([{'c': 'c_1'}], f(input, "a/b"))
self.assertEqual(['c_1'], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}]
self.assertEqual([{'b': {'c': 'c_1'}}], f(input, "a"))
self.assertEqual([{'c': 'c_1'}], f(input, "a/b"))
self.assertEqual(['c_1'], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}},
{'a': {'b': None}}]
self.assertEqual([{'b': {'c': 'c_1'}}, {'b': None}], f(input, "a"))
self.assertEqual([{'c': 'c_1'}], f(input, "a/b"))
self.assertEqual(['c_1'], f(input, "a/b/c"))
input = [{'a': {'b': {'c': 'c_1'}}},
{'a': {'b': {'c': 'c_2'}}}]
self.assertEqual([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}],
f(input, "a"))
self.assertEqual([{'c': 'c_1'}, {'c': 'c_2'}], f(input, "a/b"))
self.assertEqual(['c_1', 'c_2'], f(input, "a/b/c"))
self.assertEqual([], f(input, "a/b/c/d"))
self.assertEqual([], f(input, "c/a/b/d"))
self.assertEqual([], f(input, "i/r/t"))
def test_flattens_lists(self):
f = utils.get_from_path
input = [{'a': [1, 2, 3]}]
self.assertEqual([1, 2, 3], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': [1, 2, 3]}}]
self.assertEqual([{'b': [1, 2, 3]}], f(input, "a"))
self.assertEqual([1, 2, 3], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}]
self.assertEqual([1, 2, 3, 4, 5, 6], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}]
self.assertEqual([1, 2, 3, 4, 5, 6], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = [{'a': [1, 2, {'b': 'b_1'}]}]
self.assertEqual([1, 2, {'b': 'b_1'}], f(input, "a"))
self.assertEqual(['b_1'], f(input, "a/b"))
def test_bad_xpath(self):
f = utils.get_from_path
self.assertRaises(exception.Error, f, [], None)
self.assertRaises(exception.Error, f, [], "")
self.assertRaises(exception.Error, f, [], "/")
self.assertRaises(exception.Error, f, [], "/a")
self.assertRaises(exception.Error, f, [], "/a/")
self.assertRaises(exception.Error, f, [], "//")
self.assertRaises(exception.Error, f, [], "//a")
self.assertRaises(exception.Error, f, [], "a//a")
self.assertRaises(exception.Error, f, [], "a//a/")
self.assertRaises(exception.Error, f, [], "a/a/")
def test_real_failure1(self):
# Real world failure case...
# We weren't coping when the input was a Dictionary instead of a List
# This led to test_accepts_dictionaries
f = utils.get_from_path
inst = {'fixed_ip': {'floating_ips': [{'address': '1.2.3.4'}],
'address': '192.168.0.3'},
'hostname': ''}
private_ips = f(inst, 'fixed_ip/address')
public_ips = f(inst, 'fixed_ip/floating_ips/address')
self.assertEqual(['192.168.0.3'], private_ips)
self.assertEqual(['1.2.3.4'], public_ips)
def test_accepts_dictionaries(self):
f = utils.get_from_path
input = {'a': [1, 2, 3]}
self.assertEqual([1, 2, 3], f(input, "a"))
self.assertEqual([], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = {'a': {'b': [1, 2, 3]}}
self.assertEqual([{'b': [1, 2, 3]}], f(input, "a"))
self.assertEqual([1, 2, 3], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}
self.assertEqual([1, 2, 3, 4, 5, 6], f(input, "a/b"))
self.assertEqual([], f(input, "a/b/c"))
input = {'a': [1, 2, {'b': 'b_1'}]}
self.assertEqual([1, 2, {'b': 'b_1'}], f(input, "a"))
self.assertEqual(['b_1'], f(input, "a/b"))
class GenericUtilsTestCase(test.TestCase):
@mock.patch('os.path.exists', return_value=True)
def test_find_config(self, mock_exists):
path = '/etc/cinder/cinder.conf'
cfgpath = utils.find_config(path)
self.assertEqual(path, cfgpath)
mock_exists.return_value = False
self.assertRaises(exception.ConfigNotFound,
utils.find_config,
path)
def test_as_int(self):
test_obj_int = '2'
test_obj_float = '2.2'
for obj in [test_obj_int, test_obj_float]:
self.assertEqual(2, utils.as_int(obj))
obj = 'not_a_number'
self.assertEqual(obj, utils.as_int(obj))
self.assertRaises(TypeError,
utils.as_int,
obj,
quiet=False)
def test_is_int_like(self):
self.assertTrue(utils.is_int_like(1))
self.assertTrue(utils.is_int_like(-1))
self.assertTrue(utils.is_int_like(0b1))
self.assertTrue(utils.is_int_like(0o1))
self.assertTrue(utils.is_int_like(0x1))
self.assertTrue(utils.is_int_like('1'))
self.assertFalse(utils.is_int_like(1.0))
self.assertFalse(utils.is_int_like('abc'))
def test_check_exclusive_options(self):
utils.check_exclusive_options()
utils.check_exclusive_options(something=None,
pretty_keys=True,
unit_test=True)
self.assertRaises(exception.InvalidInput,
utils.check_exclusive_options,
test=True,
unit=False,
pretty_keys=True)
self.assertRaises(exception.InvalidInput,
utils.check_exclusive_options,
test=True,
unit=False,
pretty_keys=False)
def test_require_driver_intialized(self):
driver = mock.Mock()
driver.initialized = True
utils.require_driver_initialized(driver)
driver.initialized = False
self.assertRaises(exception.DriverNotInitialized,
utils.require_driver_initialized,
driver)
def test_hostname_unicode_sanitization(self):
hostname = u"\u7684.test.example.com"
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_periods(self):
hostname = "....test.example.com..."
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_dashes(self):
hostname = "----test.example.com---"
self.assertEqual("test.example.com",
utils.sanitize_hostname(hostname))
def test_hostname_sanitize_characters(self):
hostname = "(#@&$!(@*--#&91)(__=+--test-host.example!!.com-0+"
self.assertEqual("91----test-host.example.com-0",
utils.sanitize_hostname(hostname))
def test_hostname_translate(self):
hostname = "<}\x1fh\x10e\x08l\x02l\x05o\x12!{>"
self.assertEqual("hello", utils.sanitize_hostname(hostname))
def test_is_valid_boolstr(self):
self.assertTrue(utils.is_valid_boolstr(True))
self.assertTrue(utils.is_valid_boolstr('trUe'))
self.assertTrue(utils.is_valid_boolstr(False))
self.assertTrue(utils.is_valid_boolstr('faLse'))
self.assertTrue(utils.is_valid_boolstr('yeS'))
self.assertTrue(utils.is_valid_boolstr('nO'))
self.assertTrue(utils.is_valid_boolstr('y'))
self.assertTrue(utils.is_valid_boolstr('N'))
self.assertTrue(utils.is_valid_boolstr(1))
self.assertTrue(utils.is_valid_boolstr('1'))
self.assertTrue(utils.is_valid_boolstr(0))
self.assertTrue(utils.is_valid_boolstr('0'))
def test_generate_glance_url(self):
generated_url = utils.generate_glance_url()
actual_url = "http://%s:%d" % (CONF.glance_host,
CONF.glance_port)
self.assertEqual(generated_url, actual_url)
@mock.patch('os.path.join', side_effect=lambda x, y: '/'.join((x, y)))
def test_make_dev_path(self, mock_join):
self.assertEqual('/dev/xvda', utils.make_dev_path('xvda'))
self.assertEqual('/dev/xvdb1', utils.make_dev_path('xvdb', 1))
self.assertEqual('/foo/xvdc1', utils.make_dev_path('xvdc', 1, '/foo'))
@mock.patch('cinder.utils.execute')
def test_read_file_as_root(self, mock_exec):
out = mock.Mock()
err = mock.Mock()
mock_exec.return_value = (out, err)
test_filepath = '/some/random/path'
output = utils.read_file_as_root(test_filepath)
mock_exec.assert_called_once_with('cat', test_filepath,
run_as_root=True)
self.assertEqual(out, output)
@mock.patch('cinder.utils.execute',
side_effect=putils.ProcessExecutionError)
def test_read_file_as_root_fails(self, mock_exec):
test_filepath = '/some/random/path'
self.assertRaises(exception.FileNotFound,
utils.read_file_as_root,
test_filepath)
@mock.patch('oslo_utils.timeutils.utcnow')
def test_service_is_up(self, mock_utcnow):
fts_func = datetime.datetime.fromtimestamp
fake_now = 1000
down_time = 5
self.flags(service_down_time=down_time)
mock_utcnow.return_value = fts_func(fake_now)
# Up (equal)
service = {'updated_at': fts_func(fake_now - down_time),
'created_at': fts_func(fake_now - down_time)}
result = utils.service_is_up(service)
self.assertTrue(result)
# Up
service = {'updated_at': fts_func(fake_now - down_time + 1),
'created_at': fts_func(fake_now - down_time + 1)}
result = utils.service_is_up(service)
self.assertTrue(result)
# Down
service = {'updated_at': fts_func(fake_now - down_time - 1),
'created_at': fts_func(fake_now - down_time - 1)}
result = utils.service_is_up(service)
self.assertFalse(result)
def test_safe_parse_xml(self):
normal_body = ('<?xml version="1.0" ?>'
'<foo><bar><v1>hey</v1><v2>there</v2></bar></foo>')
def killer_body():
return (("""<!DOCTYPE x [
<!ENTITY a "%(a)s">
<!ENTITY b "%(b)s">
<!ENTITY c "%(c)s">]>
<foo>
<bar>
<v1>%(d)s</v1>
</bar>
</foo>""") % {
'a': 'A' * 10,
'b': '&a;' * 10,
'c': '&b;' * 10,
'd': '&c;' * 9999,
}).strip()
dom = utils.safe_minidom_parse_string(normal_body)
# Some versions of minidom inject extra newlines so we ignore them
result = str(dom.toxml()).replace('\n', '')
self.assertEqual(normal_body, result)
self.assertRaises(ValueError,
utils.safe_minidom_parse_string,
killer_body())
def test_xhtml_escape(self):
self.assertEqual('"foo"', utils.xhtml_escape('"foo"'))
self.assertEqual(''foo'', utils.xhtml_escape("'foo'"))
def test_hash_file(self):
data = 'Mary had a little lamb, its fleece as white as snow'
flo = six.StringIO(data)
h1 = utils.hash_file(flo)
h2 = hashlib.sha1(data).hexdigest()
self.assertEqual(h1, h2)
def test_check_ssh_injection(self):
cmd_list = ['ssh', '-D', 'my_name@name_of_remote_computer']
self.assertIsNone(utils.check_ssh_injection(cmd_list))
cmd_list = ['echo', '"quoted arg with space"']
self.assertIsNone(utils.check_ssh_injection(cmd_list))
cmd_list = ['echo', "'quoted arg with space'"]
self.assertIsNone(utils.check_ssh_injection(cmd_list))
def test_check_ssh_injection_on_error(self):
with_unquoted_space = ['ssh', 'my_name@ name_of_remote_computer']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
with_unquoted_space)
with_danger_chars = ['||', 'my_name@name_of_remote_computer']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
with_danger_chars)
with_danger_char = [';', 'my_name@name_of_remote_computer']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
with_danger_char)
with_special = ['cmd', 'virus;ls']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
with_special)
quoted_with_unescaped = ['cmd', '"arg\"withunescaped"']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
quoted_with_unescaped)
bad_before_quotes = ['cmd', 'virus;"quoted argument"']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
bad_before_quotes)
bad_after_quotes = ['echo', '"quoted argument";rm -rf']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
bad_after_quotes)
bad_within_quotes = ['echo', "'quoted argument `rm -rf`'"]
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
bad_within_quotes)
with_multiple_quotes = ['echo', '"quoted";virus;"quoted"']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
with_multiple_quotes)
with_multiple_quotes = ['echo', '"quoted";virus;\'quoted\'']
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection,
with_multiple_quotes)
@mock.patch('paramiko.SSHClient')
def test_create_channel(self, mock_client):
test_width = 600
test_height = 800
mock_channel = mock.Mock()
mock_client.invoke_shell.return_value = mock_channel
utils.create_channel(mock_client, test_width, test_height)
mock_client.invoke_shell.assert_called_once_with()
mock_channel.resize_pty.assert_called_once_with(test_width,
test_height)
@mock.patch('os.stat')
def test_get_file_mode(self, mock_stat):
class stat_result(object):
st_mode = 0o777
st_gid = 33333
test_file = '/var/tmp/made_up_file'
mock_stat.return_value = stat_result
mode = utils.get_file_mode(test_file)
self.assertEqual(mode, 0o777)
mock_stat.assert_called_once_with(test_file)
@mock.patch('os.stat')
def test_get_file_gid(self, mock_stat):
class stat_result(object):
st_mode = 0o777
st_gid = 33333
test_file = '/var/tmp/made_up_file'
mock_stat.return_value = stat_result
gid = utils.get_file_gid(test_file)
self.assertEqual(gid, 33333)
mock_stat.assert_called_once_with(test_file)
@mock.patch('cinder.utils.CONF')
def test_get_root_helper(self, mock_conf):
mock_conf.rootwrap_config = '/path/to/conf'
self.assertEqual('sudo cinder-rootwrap /path/to/conf',
utils.get_root_helper())
class TemporaryChownTestCase(test.TestCase):
@mock.patch('os.stat')
@mock.patch('os.getuid', return_value=1234)
@mock.patch('cinder.utils.execute')
def test_get_uid(self, mock_exec, mock_getuid, mock_stat):
mock_stat.return_value.st_uid = 5678
test_filename = 'a_file'
with utils.temporary_chown(test_filename):
mock_exec.assert_called_once_with('chown', 1234, test_filename,
run_as_root=True)
mock_getuid.asset_called_once_with()
mock_stat.assert_called_once_with(test_filename)
calls = [mock.call('chown', 1234, test_filename, run_as_root=True),
mock.call('chown', 5678, test_filename, run_as_root=True)]
mock_exec.assert_has_calls(calls)
@mock.patch('os.stat')
@mock.patch('os.getuid', return_value=1234)
@mock.patch('cinder.utils.execute')
def test_supplied_owner_uid(self, mock_exec, mock_getuid, mock_stat):
mock_stat.return_value.st_uid = 5678
test_filename = 'a_file'
with utils.temporary_chown(test_filename, owner_uid=9101):
mock_exec.assert_called_once_with('chown', 9101, test_filename,
run_as_root=True)
self.assertFalse(mock_getuid.called)
mock_stat.assert_called_once_with(test_filename)
calls = [mock.call('chown', 9101, test_filename, run_as_root=True),
mock.call('chown', 5678, test_filename, run_as_root=True)]
mock_exec.assert_has_calls(calls)
@mock.patch('os.stat')
@mock.patch('os.getuid', return_value=5678)
@mock.patch('cinder.utils.execute')
def test_matching_uid(self, mock_exec, mock_getuid, mock_stat):
mock_stat.return_value.st_uid = 5678
test_filename = 'a_file'
with utils.temporary_chown(test_filename):
pass
mock_getuid.asset_called_once_with()
mock_stat.assert_called_once_with(test_filename)
self.assertFalse(mock_exec.called)
class TempdirTestCase(test.TestCase):
@mock.patch('tempfile.mkdtemp')
@mock.patch('shutil.rmtree')
def test_tempdir(self, mock_rmtree, mock_mkdtemp):
with utils.tempdir(a='1', b=2) as td:
self.assertEqual(mock_mkdtemp.return_value, td)
self.assertFalse(mock_rmtree.called)
mock_mkdtemp.assert_called_once_with(a='1', b=2)
mock_rmtree.assert_called_once_with(mock_mkdtemp.return_value)
@mock.patch('tempfile.mkdtemp')
@mock.patch('shutil.rmtree', side_effect=OSError)
def test_tempdir_error(self, mock_rmtree, mock_mkdtemp):
with utils.tempdir(a='1', b=2) as td:
self.assertEqual(mock_mkdtemp.return_value, td)
self.assertFalse(mock_rmtree.called)
mock_mkdtemp.assert_called_once_with(a='1', b=2)
mock_rmtree.assert_called_once_with(mock_mkdtemp.return_value)
class WalkClassHierarchyTestCase(test.TestCase):
def test_walk_class_hierarchy(self):
class A(object):
pass
class B(A):
pass
class C(A):
pass
class D(B):
pass
class E(A):
pass
class_pairs = zip((D, B, E),
utils.walk_class_hierarchy(A, encountered=[C]))
for actual, expected in class_pairs:
self.assertEqual(actual, expected)
class_pairs = zip((D, B, C, E), utils.walk_class_hierarchy(A))
for actual, expected in class_pairs:
self.assertEqual(actual, expected)
class GetDiskOfPartitionTestCase(test.TestCase):
def test_devpath_is_diskpath(self):
devpath = '/some/path'
st_mock = mock.Mock()
output = utils._get_disk_of_partition(devpath, st_mock)
self.assertEqual('/some/path', output[0])
self.assertIs(st_mock, output[1])
with mock.patch('os.stat') as mock_stat:
devpath = '/some/path'
output = utils._get_disk_of_partition(devpath)
mock_stat.assert_called_once_with(devpath)
self.assertEqual(devpath, output[0])
self.assertIs(mock_stat.return_value, output[1])
@mock.patch('os.stat', side_effect=OSError)
def test_stat_oserror(self, mock_stat):
st_mock = mock.Mock()
devpath = '/some/path1'
output = utils._get_disk_of_partition(devpath, st_mock)
mock_stat.assert_called_once_with('/some/path')
self.assertEqual(devpath, output[0])
self.assertIs(st_mock, output[1])
@mock.patch('stat.S_ISBLK', return_value=True)
@mock.patch('os.stat')
def test_diskpath_is_block_device(self, mock_stat, mock_isblk):
st_mock = mock.Mock()
devpath = '/some/path1'
output = utils._get_disk_of_partition(devpath, st_mock)
self.assertEqual('/some/path', output[0])
self.assertEqual(mock_stat.return_value, output[1])
@mock.patch('stat.S_ISBLK', return_value=False)
@mock.patch('os.stat')
def test_diskpath_is_not_block_device(self, mock_stat, mock_isblk):
st_mock = mock.Mock()
devpath = '/some/path1'
output = utils._get_disk_of_partition(devpath, st_mock)
self.assertEqual(devpath, output[0])
self.assertEqual(st_mock, output[1])
class GetBlkdevMajorMinorTestCase(test.TestCase):
@mock.patch('os.stat')
def test_get_file_size(self, mock_stat):
class stat_result(object):
st_mode = 0o777
st_size = 1074253824
test_file = '/var/tmp/made_up_file'
mock_stat.return_value = stat_result
size = utils.get_file_size(test_file)
self.assertEqual(size, stat_result.st_size)
mock_stat.assert_called_once_with(test_file)
@mock.patch('os.stat')
def test_get_blkdev_major_minor(self, mock_stat):
class stat_result(object):
st_mode = 0o60660
st_rdev = os.makedev(253, 7)
test_device = '/dev/made_up_blkdev'
mock_stat.return_value = stat_result
dev = utils.get_blkdev_major_minor(test_device)
self.assertEqual('253:7', dev)
mock_stat.assert_called_once_with(test_device)
@mock.patch('os.stat')
@mock.patch.object(utils, 'execute')
def _test_get_blkdev_major_minor_file(self, test_partition,
mock_exec, mock_stat):
mock_exec.return_value = (
'Filesystem Size Used Avail Use%% Mounted on\n'
'%s 4096 2048 2048 50%% /tmp\n' % test_partition, None)
test_file = '/tmp/file'
test_disk = '/dev/made_up_disk'
class stat_result_file(object):
st_mode = 0o660
class stat_result_partition(object):
st_mode = 0o60660
st_rdev = os.makedev(8, 65)
class stat_result_disk(object):
st_mode = 0o60660
st_rdev = os.makedev(8, 64)
def fake_stat(path):
try:
return {test_file: stat_result_file,
test_partition: stat_result_partition,
test_disk: stat_result_disk}[path]
except KeyError:
raise OSError
mock_stat.side_effect = fake_stat
dev = utils.get_blkdev_major_minor(test_file)
mock_stat.assert_any_call(test_file)
mock_exec.assert_called_once_with('df', test_file)
if test_partition.startswith('/'):
mock_stat.assert_any_call(test_partition)
mock_stat.assert_any_call(test_disk)
return dev
def test_get_blkdev_major_minor_file(self):
dev = self._test_get_blkdev_major_minor_file('/dev/made_up_disk1')
self.assertEqual('8:64', dev)
def test_get_blkdev_major_minor_file_nfs(self):
dev = self._test_get_blkdev_major_minor_file('nfs-server:/export/path')
self.assertIsNone(dev)
@mock.patch('os.stat')
@mock.patch('stat.S_ISCHR', return_value=False)
@mock.patch('stat.S_ISBLK', return_value=False)
def test_get_blkdev_failure(self, mock_isblk, mock_ischr, mock_stat):
path = '/some/path'
self.assertRaises(exception.Error,
utils.get_blkdev_major_minor,
path, lookup_for_file=False)
mock_stat.assert_called_once_with(path)
mock_isblk.assert_called_once_with(mock_stat.return_value.st_mode)
mock_ischr.assert_called_once_with(mock_stat.return_value.st_mode)
@mock.patch('os.stat')
@mock.patch('stat.S_ISCHR', return_value=True)
@mock.patch('stat.S_ISBLK', return_value=False)
def test_get_blkdev_is_chr(self, mock_isblk, mock_ischr, mock_stat):
path = '/some/path'
output = utils.get_blkdev_major_minor(path, lookup_for_file=False)
mock_stat.assert_called_once_with(path)
mock_isblk.assert_called_once_with(mock_stat.return_value.st_mode)
mock_ischr.assert_called_once_with(mock_stat.return_value.st_mode)
self.assertIs(None, output)
class MonkeyPatchTestCase(test.TestCase):
"""Unit test for utils.monkey_patch()."""
def setUp(self):
super(MonkeyPatchTestCase, self).setUp()
self.example_package = 'cinder.tests.unit.monkey_patch_example.'
self.flags(
monkey_patch=True,
monkey_patch_modules=[self.example_package + 'example_a' + ':'
+ self.example_package
+ 'example_decorator'])
def test_monkey_patch(self):
utils.monkey_patch()
cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION = []
from cinder.tests.unit.monkey_patch_example import example_a
from cinder.tests.unit.monkey_patch_example import example_b
self.assertEqual('Example function', example_a.example_function_a())
exampleA = example_a.ExampleClassA()
exampleA.example_method()
ret_a = exampleA.example_method_add(3, 5)
self.assertEqual(ret_a, 8)
self.assertEqual('Example function', example_b.example_function_b())
exampleB = example_b.ExampleClassB()
exampleB.example_method()
ret_b = exampleB.example_method_add(3, 5)
self.assertEqual(ret_b, 8)
package_a = self.example_package + 'example_a.'
self.assertTrue(
package_a + 'example_function_a'
in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertTrue(
package_a + 'ExampleClassA.example_method'
in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertTrue(
package_a + 'ExampleClassA.example_method_add'
in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION)
package_b = self.example_package + 'example_b.'
self.assertFalse(
package_b + 'example_function_b'
in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertFalse(
package_b + 'ExampleClassB.example_method'
in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION)
self.assertFalse(
package_b + 'ExampleClassB.example_method_add'
in cinder.tests.unit.monkey_patch_example.CALLED_FUNCTION)
class AuditPeriodTest(test.TestCase):
def setUp(self):
super(AuditPeriodTest, self).setUp()
test_time = datetime.datetime(second=23,
minute=12,
hour=8,
day=5,
month=3,
year=2012)
patcher = mock.patch.object(timeutils, 'utcnow')
self.addCleanup(patcher.stop)
self.mock_utcnow = patcher.start()
self.mock_utcnow.return_value = test_time
def test_hour(self):
begin, end = utils.last_completed_audit_period(unit='hour')
self.assertEqual(begin,
datetime.datetime(hour=7,
day=5,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(hour=8,
day=5,
month=3,
year=2012))
def test_hour_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@10')
self.assertEqual(begin, datetime.datetime(minute=10,
hour=7,
day=5,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(minute=10,
hour=8,
day=5,
month=3,
year=2012))
def test_hour_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='hour@30')
self.assertEqual(begin, datetime.datetime(minute=30,
hour=6,
day=5,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(minute=30,
hour=7,
day=5,
month=3,
year=2012))
def test_day(self):
begin, end = utils.last_completed_audit_period(unit='day')
self.assertEqual(begin, datetime.datetime(day=4,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(day=5,
month=3,
year=2012))
def test_day_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='day@6')
self.assertEqual(begin, datetime.datetime(hour=6,
day=4,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(hour=6,
day=5,
month=3,
year=2012))
def test_day_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='day@10')
self.assertEqual(begin, datetime.datetime(hour=10,
day=3,
month=3,
year=2012))
self.assertEqual(end, datetime.datetime(hour=10,
day=4,
month=3,
year=2012))
def test_month(self):
begin, end = utils.last_completed_audit_period(unit='month')
self.assertEqual(begin, datetime.datetime(day=1,
month=2,
year=2012))
self.assertEqual(end, datetime.datetime(day=1,
month=3,
year=2012))
def test_month_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='month@2')
self.assertEqual(begin, datetime.datetime(day=2,
month=2,
year=2012))
self.assertEqual(end, datetime.datetime(day=2,
month=3,
year=2012))
def test_month_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='month@15')
self.assertEqual(begin, datetime.datetime(day=15,
month=1,
year=2012))
self.assertEqual(end, datetime.datetime(day=15,
month=2,
year=2012))
@mock.patch('oslo_utils.timeutils.utcnow',
return_value=datetime.datetime(day=1,
month=1,
year=2012))
def test_month_jan_day_first(self, mock_utcnow):
begin, end = utils.last_completed_audit_period(unit='month')
self.assertEqual(datetime.datetime(day=1, month=11, year=2011), begin)
self.assertEqual(datetime.datetime(day=1, month=12, year=2011), end)
@mock.patch('oslo_utils.timeutils.utcnow',
return_value=datetime.datetime(day=2,
month=1,
year=2012))
def test_month_jan_day_not_first(self, mock_utcnow):
begin, end = utils.last_completed_audit_period(unit='month')
self.assertEqual(datetime.datetime(day=1, month=12, year=2011), begin)
self.assertEqual(datetime.datetime(day=1, month=1, year=2012), end)
def test_year(self):
begin, end = utils.last_completed_audit_period(unit='year')
self.assertEqual(begin, datetime.datetime(day=1,
month=1,
year=2011))
self.assertEqual(end, datetime.datetime(day=1,
month=1,
year=2012))
def test_year_with_offset_before_current(self):
begin, end = utils.last_completed_audit_period(unit='year@2')
self.assertEqual(begin, datetime.datetime(day=1,
month=2,
year=2011))
self.assertEqual(end, datetime.datetime(day=1,
month=2,
year=2012))
def test_year_with_offset_after_current(self):
begin, end = utils.last_completed_audit_period(unit='year@6')
self.assertEqual(begin, datetime.datetime(day=1,
month=6,
year=2010))
self.assertEqual(end, datetime.datetime(day=1,
month=6,
year=2011))
def test_invalid_unit(self):
self.assertRaises(ValueError,
utils.last_completed_audit_period,
unit='invalid_unit')
@mock.patch('cinder.utils.CONF')
def test_uses_conf_unit(self, mock_conf):
mock_conf.volume_usage_audit_period = 'hour'
begin1, end1 = utils.last_completed_audit_period()
self.assertEqual(60.0 * 60, (end1 - begin1).total_seconds())
mock_conf.volume_usage_audit_period = 'day'
begin2, end2 = utils.last_completed_audit_period()
self.assertEqual(60.0 * 60 * 24, (end2 - begin2).total_seconds())
class FakeSSHClient(object):
def __init__(self):
self.id = uuid.uuid4()
self.transport = FakeTransport()
def set_missing_host_key_policy(self, policy):
self.policy = policy
def load_system_host_keys(self):
self.system_host_keys = 'system_host_keys'
def load_host_keys(self, hosts_key_file):
self.hosts_key_file = hosts_key_file
def connect(self, ip, port=22, username=None, password=None,
pkey=None, timeout=10):
pass
def get_transport(self):
return self.transport
def get_policy(self):
return self.policy
def get_host_keys(self):
return '127.0.0.1 ssh-rsa deadbeef'
def close(self):
pass
def __call__(self, *args, **kwargs):
pass
class FakeSock(object):
def settimeout(self, timeout):
pass
class FakeTransport(object):
def __init__(self):
self.active = True
self.sock = FakeSock()
def set_keepalive(self, timeout):
pass
def is_active(self):
return self.active
class SSHPoolTestCase(test.TestCase):
"""Unit test for SSH Connection Pool."""
@mock.patch('cinder.ssh_utils.CONF')
@mock.patch('__builtin__.open')
@mock.patch('paramiko.SSHClient')
@mock.patch('os.path.isfile', return_value=True)
def test_ssh_default_hosts_key_file(self, mock_isfile, mock_sshclient,
mock_open, mock_conf):
mock_ssh = mock.MagicMock()
mock_sshclient.return_value = mock_ssh
mock_conf.ssh_hosts_key_file = '/var/lib/cinder/ssh_known_hosts'
# create with customized setting
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
host_key_files = sshpool.hosts_key_file
self.assertEqual('/var/lib/cinder/ssh_known_hosts', host_key_files)
mock_ssh.load_host_keys.assert_called_once_with(
'/var/lib/cinder/ssh_known_hosts')
@mock.patch('cinder.ssh_utils.CONF')
@mock.patch('__builtin__.open')
@mock.patch('paramiko.SSHClient')
@mock.patch('os.path.isfile', return_value=True)
def test_ssh_host_key_file_kwargs(self, mock_isfile, mock_sshclient,
mock_open, mock_conf):
mock_ssh = mock.MagicMock()
mock_sshclient.return_value = mock_ssh
mock_conf.ssh_hosts_key_file = '/var/lib/cinder/ssh_known_hosts'
# create with customized setting
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1,
hosts_key_file='dummy_host_keyfile')
host_key_files = sshpool.hosts_key_file
self.assertIn('dummy_host_keyfile', host_key_files)
self.assertIn('/var/lib/cinder/ssh_known_hosts', host_key_files)
expected = [
mock.call.load_host_keys('dummy_host_keyfile'),
mock.call.load_host_keys('/var/lib/cinder/ssh_known_hosts')]
mock_ssh.assert_has_calls(expected, any_order=True)
@mock.patch('cinder.ssh_utils.CONF')
@mock.patch('__builtin__.open')
@mock.patch('os.path.isfile', return_value=True)
@mock.patch('paramiko.RSAKey.from_private_key_file')
@mock.patch('paramiko.SSHClient')
def test_single_ssh_connect(self, mock_sshclient, mock_pkey, mock_isfile,
mock_open, mock_conf):
mock_conf.ssh_hosts_key_file = '/var/lib/cinder/ssh_known_hosts'
# create with password
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
with sshpool.item() as ssh:
first_id = ssh.id
with sshpool.item() as ssh:
second_id = ssh.id
self.assertEqual(first_id, second_id)
self.assertEqual(1, mock_sshclient.return_value.connect.call_count)
# create with private key
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
privatekey="test",
min_size=1,
max_size=1)
self.assertEqual(2, mock_sshclient.return_value.connect.call_count)
# attempt to create with no password or private key
self.assertRaises(paramiko.SSHException,
ssh_utils.SSHPool,
"127.0.0.1", 22, 10,
"test",
min_size=1,
max_size=1)
@mock.patch('__builtin__.open')
@mock.patch('paramiko.SSHClient')
def test_closed_reopened_ssh_connections(self, mock_sshclient, mock_open):
mock_sshclient.return_value = eval('FakeSSHClient')()
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=4)
with sshpool.item() as ssh:
mock_sshclient.reset_mock()
first_id = ssh.id
with sshpool.item() as ssh:
second_id = ssh.id
ssh.get_transport().active = False
sshpool.remove(ssh)
self.assertEqual(first_id, second_id)
# create a new client
mock_sshclient.return_value = FakeSSHClient()
with sshpool.item() as ssh:
third_id = ssh.id
self.assertNotEqual(first_id, third_id)
@mock.patch('cinder.ssh_utils.CONF')
@mock.patch('__builtin__.open')
@mock.patch('paramiko.SSHClient')
def test_missing_ssh_hosts_key_config(self, mock_sshclient, mock_open,
mock_conf):
mock_sshclient.return_value = FakeSSHClient()
mock_conf.ssh_hosts_key_file = None
# create with password
self.assertRaises(exception.ParameterNotFound,
ssh_utils.SSHPool,
"127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
@mock.patch('__builtin__.open')
@mock.patch('paramiko.SSHClient')
def test_create_default_known_hosts_file(self, mock_sshclient,
mock_open):
mock_sshclient.return_value = FakeSSHClient()
CONF.state_path = '/var/lib/cinder'
CONF.ssh_hosts_key_file = '/var/lib/cinder/ssh_known_hosts'
default_file = '/var/lib/cinder/ssh_known_hosts'
ssh_pool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
with ssh_pool.item() as ssh:
mock_open.assert_called_once_with(default_file, 'a')
ssh_pool.remove(ssh)
@mock.patch('os.path.isfile', return_value=False)
@mock.patch('__builtin__.open')
@mock.patch('paramiko.SSHClient')
def test_ssh_missing_hosts_key_file(self, mock_sshclient, mock_open,
mock_isfile):
mock_sshclient.return_value = FakeSSHClient()
CONF.ssh_hosts_key_file = '/tmp/blah'
self.assertNotIn(CONF.state_path, CONF.ssh_hosts_key_file)
self.assertRaises(exception.InvalidInput,
ssh_utils.SSHPool,
"127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
@mock.patch.multiple('cinder.ssh_utils.CONF',
strict_ssh_host_key_policy=True,
ssh_hosts_key_file='/var/lib/cinder/ssh_known_hosts')
@mock.patch('__builtin__.open')
@mock.patch('paramiko.SSHClient')
@mock.patch('os.path.isfile', return_value=True)
def test_ssh_strict_host_key_policy(self, mock_isfile, mock_sshclient,
mock_open):
mock_sshclient.return_value = FakeSSHClient()
# create with customized setting
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
with sshpool.item() as ssh:
self.assertTrue(isinstance(ssh.get_policy(),
paramiko.RejectPolicy))
@mock.patch('__builtin__.open')
@mock.patch('paramiko.SSHClient')
@mock.patch('os.path.isfile', return_value=True)
def test_ssh_not_strict_host_key_policy(self, mock_isfile, mock_sshclient,
mock_open):
mock_sshclient.return_value = FakeSSHClient()
CONF.strict_ssh_host_key_policy = False
# create with customized setting
sshpool = ssh_utils.SSHPool("127.0.0.1", 22, 10,
"test",
password="test",
min_size=1,
max_size=1)
with sshpool.item() as ssh:
self.assertTrue(isinstance(ssh.get_policy(),
paramiko.AutoAddPolicy))
class BrickUtils(test.TestCase):
"""Unit test to test the brick utility
wrapper functions.
"""
@mock.patch('cinder.utils.CONF')
@mock.patch('os_brick.initiator.connector.get_connector_properties')
@mock.patch('cinder.utils.get_root_helper')
def test_brick_get_connector_properties(self, mock_helper, mock_get,
mock_conf):
mock_conf.my_ip = '1.2.3.4'
output = utils.brick_get_connector_properties()
mock_helper.assert_called_once_with()
mock_get.assert_called_once_with(mock_helper.return_value, '1.2.3.4',
False, False)
self.assertEqual(mock_get.return_value, output)
@mock.patch('os_brick.initiator.connector.InitiatorConnector.factory')
@mock.patch('cinder.utils.get_root_helper')
def test_brick_get_connector(self, mock_helper, mock_factory):
output = utils.brick_get_connector('protocol')
mock_helper.assert_called_once_with()
self.assertEqual(mock_factory.return_value, output)
mock_factory.assert_called_once_with(
'protocol', mock_helper.return_value, driver=None,
execute=putils.execute, use_multipath=False,
device_scan_attempts=3)
class StringLengthTestCase(test.TestCase):
def test_check_string_length(self):
self.assertIsNone(utils.check_string_length(
'test', 'name', max_length=255))
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
11, 'name', max_length=255)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'', 'name', min_length=1)
self.assertRaises(exception.InvalidInput,
utils.check_string_length,
'a' * 256, 'name', max_length=255)
class AddVisibleAdminMetadataTestCase(test.TestCase):
def test_add_visible_admin_metadata_visible_key_only(self):
admin_metadata = [{"key": "invisible_key", "value": "invisible_value"},
{"key": "readonly", "value": "visible"},
{"key": "attached_mode", "value": "visible"}]
metadata = [{"key": "key", "value": "value"},
{"key": "readonly", "value": "existing"}]
volume = {'volume_admin_metadata': admin_metadata,
'volume_metadata': metadata}
utils.add_visible_admin_metadata(volume)
self.assertEqual([{"key": "key", "value": "value"},
{"key": "readonly", "value": "visible"},
{"key": "attached_mode", "value": "visible"}],
volume['volume_metadata'])
admin_metadata = {"invisible_key": "invisible_value",
"readonly": "visible",
"attached_mode": "visible"}
metadata = {"key": "value", "readonly": "existing"}
volume = {'admin_metadata': admin_metadata,
'metadata': metadata}
utils.add_visible_admin_metadata(volume)
self.assertEqual({'key': 'value',
'attached_mode': 'visible',
'readonly': 'visible'},
volume['metadata'])
def test_add_visible_admin_metadata_no_visible_keys(self):
admin_metadata = [
{"key": "invisible_key1", "value": "invisible_value1"},
{"key": "invisible_key2", "value": "invisible_value2"},
{"key": "invisible_key3", "value": "invisible_value3"}]
metadata = [{"key": "key", "value": "value"}]
volume = {'volume_admin_metadata': admin_metadata,
'volume_metadata': metadata}
utils.add_visible_admin_metadata(volume)
self.assertEqual([{"key": "key", "value": "value"}],
volume['volume_metadata'])
admin_metadata = {"invisible_key1": "invisible_value1",
"invisible_key2": "invisible_value2",
"invisible_key3": "invisible_value3"}
metadata = {"key": "value"}
volume = {'admin_metadata': admin_metadata,
'metadata': metadata}
utils.add_visible_admin_metadata(volume)
self.assertEqual({'key': 'value'}, volume['metadata'])
def test_add_visible_admin_metadata_no_existing_metadata(self):
admin_metadata = [{"key": "invisible_key", "value": "invisible_value"},
{"key": "readonly", "value": "visible"},
{"key": "attached_mode", "value": "visible"}]
volume = {'volume_admin_metadata': admin_metadata}
utils.add_visible_admin_metadata(volume)
self.assertEqual({'attached_mode': 'visible', 'readonly': 'visible'},
volume['metadata'])
admin_metadata = {"invisible_key": "invisible_value",
"readonly": "visible",
"attached_mode": "visible"}
volume = {'admin_metadata': admin_metadata}
utils.add_visible_admin_metadata(volume)
self.assertEqual({'attached_mode': 'visible', 'readonly': 'visible'},
volume['metadata'])
class InvalidFilterTestCase(test.TestCase):
def test_admin_allows_all_options(self):
ctxt = mock.Mock(name='context')
ctxt.is_admin = True
filters = {'allowed1': None, 'allowed2': None, 'not_allowed1': None}
fltrs_orig = {'allowed1': None, 'allowed2': None, 'not_allowed1': None}
allowed_search_options = ('allowed1', 'allowed2')
allowed_orig = ('allowed1', 'allowed2')
utils.remove_invalid_filter_options(ctxt, filters,
allowed_search_options)
self.assertEqual(allowed_orig, allowed_search_options)
self.assertEqual(fltrs_orig, filters)
def test_admin_allows_some_options(self):
ctxt = mock.Mock(name='context')
ctxt.is_admin = False
filters = {'allowed1': None, 'allowed2': None, 'not_allowed1': None}
fltrs_orig = {'allowed1': None, 'allowed2': None, 'not_allowed1': None}
allowed_search_options = ('allowed1', 'allowed2')
allowed_orig = ('allowed1', 'allowed2')
utils.remove_invalid_filter_options(ctxt, filters,
allowed_search_options)
self.assertEqual(allowed_orig, allowed_search_options)
self.assertNotEqual(fltrs_orig, filters)
self.assertEqual(allowed_search_options, tuple(sorted(filters.keys())))
class IsBlkDeviceTestCase(test.TestCase):
@mock.patch('stat.S_ISBLK', return_value=True)
@mock.patch('os.stat')
def test_is_blk_device(self, mock_os_stat, mock_S_ISBLK):
dev = 'some_device'
self.assertTrue(utils.is_blk_device(dev))
@mock.patch('stat.S_ISBLK', return_value=False)
@mock.patch('os.stat')
def test_not_is_blk_device(self, mock_os_stat, mock_S_ISBLK):
dev = 'not_some_device'
self.assertFalse(utils.is_blk_device(dev))
@mock.patch('stat.S_ISBLK', side_effect=Exception)
@mock.patch('os.stat')
def test_fail_is_blk_device(self, mock_os_stat, mock_S_ISBLK):
dev = 'device_exception'
self.assertFalse(utils.is_blk_device(dev))
class WrongException(Exception):
pass
class TestRetryDecorator(test.TestCase):
def setUp(self):
super(TestRetryDecorator, self).setUp()
def test_no_retry_required(self):
self.counter = 0
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.VolumeBackendAPIException,
interval=2,
retries=3,
backoff_rate=2)
def succeeds():
self.counter += 1
return 'success'
ret = succeeds()
self.assertFalse(mock_sleep.called)
self.assertEqual(ret, 'success')
self.assertEqual(self.counter, 1)
def test_retries_once(self):
self.counter = 0
interval = 2
backoff_rate = 2
retries = 3
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.VolumeBackendAPIException,
interval,
retries,
backoff_rate)
def fails_once():
self.counter += 1
if self.counter < 2:
raise exception.VolumeBackendAPIException(data='fake')
else:
return 'success'
ret = fails_once()
self.assertEqual(ret, 'success')
self.assertEqual(self.counter, 2)
self.assertEqual(mock_sleep.call_count, 1)
mock_sleep.assert_called_with(interval * backoff_rate)
def test_limit_is_reached(self):
self.counter = 0
retries = 3
interval = 2
backoff_rate = 4
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.VolumeBackendAPIException,
interval,
retries,
backoff_rate)
def always_fails():
self.counter += 1
raise exception.VolumeBackendAPIException(data='fake')
self.assertRaises(exception.VolumeBackendAPIException,
always_fails)
self.assertEqual(retries, self.counter)
expected_sleep_arg = []
for i in xrange(retries):
if i > 0:
interval *= backoff_rate
expected_sleep_arg.append(float(interval))
mock_sleep.assert_has_calls(map(mock.call, expected_sleep_arg))
def test_wrong_exception_no_retry(self):
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.VolumeBackendAPIException)
def raise_unexpected_error():
raise WrongException("wrong exception")
self.assertRaises(WrongException, raise_unexpected_error)
self.assertFalse(mock_sleep.called)
class VersionTestCase(test.TestCase):
def test_convert_version_to_int(self):
self.assertEqual(utils.convert_version_to_int('6.2.0'), 6002000)
self.assertEqual(utils.convert_version_to_int((6, 4, 3)), 6004003)
self.assertEqual(utils.convert_version_to_int((5, )), 5)
self.assertRaises(exception.CinderException,
utils.convert_version_to_int, '5a.6b')
def test_convert_version_to_string(self):
self.assertEqual(utils.convert_version_to_str(6007000), '6.7.0')
self.assertEqual(utils.convert_version_to_str(4), '4')
def test_convert_version_to_tuple(self):
self.assertEqual(utils.convert_version_to_tuple('6.7.0'), (6, 7, 0))
|
julianwang/cinder
|
cinder/tests/unit/test_utils.py
|
Python
|
apache-2.0
| 61,444
|
# Create your views here.
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import json
from couchbase import Couchbase
from couchbase import *
import subprocess
import os
import time
import ConfigParser
from time import sleep
import sys
import copy
from pprint import pprint
import az
def timestamp():
now = time.time()
localtime = time.localtime(now)
milliseconds = '%03d' % int((now - int(now)) * 1000)
return time.strftime('%Y%m%d%H%M%S', localtime) + milliseconds
def _create_node_AWS(request):
location = request['loc']
if location == 'East':
cls = get_driver(Provider.EC2_US_EAST)
AWS_EC2_ACCESS_ID = request['ackey']
AWS_EC2_SECRET_KEY = request['seckey']
driver = cls(AWS_EC2_ACCESS_ID, AWS_EC2_SECRET_KEY)
ACCESS_KEY_NAME = request['keyname']
sizes = driver.list_sizes()
for size in sizes:
print size
MY_SIZE = request['machine']
MY_IMAGE = 'ami-76817c1e'
size = [s for s in sizes if s.id == MY_SIZE][0]
image = driver.get_image(MY_IMAGE)
print image
print size
y = request['cpus']
a=0
n = int(y)
nodes = list()
while a < n :
tm = timestamp()
nodename = "{0}_".format(request['depname']) + "{0}".format(tm)
node = driver.create_node(name=nodename, image=image, size=size, ex_keyname=ACCESS_KEY_NAME)
nodes.append(node)
a = a+1;
nodesup = list()
for node in nodes :
while node.state != 0 :
regionNodes = driver.list_nodes()
node = [val for val in regionNodes if val.id == node.id][0]
continue
nodesup.append(node)
for node in nodesup:
print node.__dict__
return nodesup
def _create_node_GCE(request):
Driver = get_driver(Provider.GCE)
print request['email']
print request['loc']
print request['projid']
gce = Driver('{0}'.format(request['email']), "./PRIV.pem",
datacenter='{0}'.format(request['loc']),
sizes = gce.list_sizes()
for size in sizes:
print size
images = gce.list_images()
for image in images:
print image
location = request['loc']
fp = open("/tmp/id_rsa.pub", 'r')
key = fp.read()
fp.close()
metadata = {'sshKeys': 'couchbase:%s' %key}
y = request['cpus']
a=0
n = int(y)
nodes = list()
while a < n :
tm = timestamp()
nodename = "{0}".format(request['depname']) + "{0}".format(tm)
node = gce.create_node(name=nodename, image='centos-6', size=request['machine'], ex_metadata= metadata)
nodes.append(node)
a = a+1;
nodesup = list()
for node in nodes :
while node.state != 0 :
regionNodes = gce.list_nodes()
node = [val for val in regionNodes if val.id == node.id][0]
continue
nodesup.append(node)
for node in nodesup:
print node.__dict__
return nodesup
def _create_node_RackSpace(request):
cls = get_driver(Provider.RACKSPACE)
driver = cls(request['rkusername'],request['apikey'])
pprint(driver.list_sizes())
pprint(driver.list_nodes())
images = driver.list_images()
sizes = driver.list_sizes()
for image in images:
if image.id == "3a6e29eb-3e17-40ed-9f1e-c6c0fb8fcb76":
os_image = image
break
y = request['cpus']
a=0
n = int(y)
nodes = list()
while a < n :
tm = timestamp()
nodename = "{0}".format(request['depname']) + "{0}".format(tm)
node = driver.create_node(name=nodename, image=os_image, size=sizes[4], ex_keyname=request['keyname'])
nodes.append(node)
a = a+1;
nodesup = list()
for node in nodes :
while node.state != 0 :
regionNodes = driver.list_nodes()
node = [val for val in regionNodes if val.id == node.id][0]
continue
nodesup.append(node)
for node in nodesup:
print node.__dict__
return nodesup
def create_instance(request):
if request['provider'] == "AWS":
nodes = _create_node_AWS(request)
elif request['provider'] == "GCE":
nodes = _create_node_GCE(request)
elif request['provider'] == "RackSpace":
nodes = _create_node_RackSpace(request)
elif request['provider'] == "Azure":
nodes = az.AzureHandler(request)
return nodes
def handleNewDeployment(request,cb,depReq):
nodesup = create_instance(request)
bucket =[]
vms = []
if (request['provider'] == "AWS"):
for node in nodesup :
vms.append({'ip' : convert_node_ip_string(node.public_ips), 'nodeid':node.id,
'dns': convert_node_ip_string(node.__dict__['extra']['dns_name'])})
elif (request['provider'] == "GCE"):
for node in nodesup :
vms.append({'ip' : convert_node_ip_string(node.public_ips), 'nodeid':node.id,
'pip': convert_node_ip_string(node.private_ips), 'dns':convert_node_ip_string(node.public_ips)})
elif (request['provider'] == "RackSpace"):
for node in nodesup :
vms.append({'ip' : convert_node_ip_string(node.public_ips[0]), 'nodeid':node.id,
'pip': convert_node_ip_string(node.private_ips), 'dns':convert_node_ip_string(node.public_ips[0])})
bucket.append ({ 'bucketname' : request['bucketname'], 'bucketsize':request['bucket_size']});
result = cb.get("user::{0}".format(request['username'])).value
print "hehhr"
print result
try :
store = result
result = json.loads(result)
except:
result = store
request['status'] = "IHDE"
if result['deploy'] == None:
print "1"
result['deploy'] = [{'request':request,
'bucket': bucket,
'vm':vms}]
else :
print "2"
result['deploy'].append({'request':request, 'bucket' : bucket, 'vm':vms})
print result
cb.set(depReq,request)
cb.set("user::{0}".format(request['username']), result)
if (request['provider'] == "AWS"):
aws_mkfile(nodesup,depReq)
elif(request['provider'] == "GCE"):
gce_mkfile(nodesup,depReq)
gce_mkfile_cluster(nodesup,depReq)
elif(request['provider'] == "RackSpace"):
rackspace_mkfile(nodesup,depReq)
def handleNewInstances(request,cb,depReq):
nodesup = create_instance(request)
resultSession = cb.get("SessionDetails::{0}".format(request['username'])).value
depIndex = request['deploymentIndex']
result = cb.get("user::{0}".format(request['username'])).value
try :
store = result
result = json.loads(result)
except:
result = store
vms = result['deploy'][depIndex]['vm']
newVM= []
mainVm = vms[0]
newVM.append(mainVm)
for node in nodesup :
if request['provider'] == "AWS":
vms.append({'ip' : convert_node_ip_string(node.public_ips), 'nodeid':node.id,
'dns':node.__dict__['extra']['dns_name']})
newVM.append({'ip' : convert_node_ip_string(node.public_ips), 'nodeid':node.id,
'dns':node.__dict__['extra']['dns_name']})
elif request['provider'] == "GCE":
vms.append({'ip' : convert_node_ip_string(node.public_ips), 'nodeid':node.id,
'dns':convert_node_ip_string(node.public_ips), 'pip':convert_node_ip_string(node.private_ips)})
newVM.append({'ip' : convert_node_ip_string(node.public_ips), 'nodeid':node.id,
'dns':convert_node_ip_string(node.public_ips), 'pip':convert_node_ip_string(node.private_ips)})
elif request['provider'] == "RackSpace":
vms.append({'ip' : convert_node_ip_string(node.public_ips[0]), 'nodeid':node.id,
'dns':convert_node_ip_string(node.public_ips[0]), 'pip':convert_node_ip_string(node.private_ips)})
newVM.append({'ip' : convert_node_ip_string(node.public_ips[0]), 'nodeid':node.id,
'dns':convert_node_ip_string(node.public_ips[0]), 'pip':convert_node_ip_string(node.private_ips)})
result['deploy'][depIndex]['newvm'] = newVM
result['deploy'][depIndex]['vm'] = vms
cpu = result['deploy'][depIndex]['request']['cpus']
result['deploy'][depIndex]['request']['cpus'] = int (cpu) +1
cb.set("user::{0}".format(request['username']),result)
print result
request["status"] = "IHAD"
request['vmprimary'] = mainVm
cb.set(depReq,request)
if request['provider'] == "AWS":
aws_mkfile(nodesup,depReq)
elif request['provider'] == "GCE":
gce_mkfile(nodesup,depReq)
else:
rackspace_mkfile(nodesup,depReq)
def del_aws_ins(request, listIns):
location = request['loc']
if location == 'East':
cls = get_driver(Provider.EC2_US_EAST)
AWS_EC2_ACCESS_ID = request['ackey']
AWS_EC2_SECRET_KEY = request['seckey']
driver = cls(AWS_EC2_ACCESS_ID, AWS_EC2_SECRET_KEY)
ACCESS_KEY_NAME = request['keyname']
sizes = driver.list_sizes()
MY_SIZE = request['machine']
MY_IMAGE = 'ami-76817c1e'
size = [s for s in sizes if s.id == MY_SIZE][0]
image = driver.get_image(MY_IMAGE)
nodes = driver.list_nodes()
for node in nodes :
if (node.id in listIns):
driver.destroy_node(node)
def del_inst(request, listIns):
Driver = get_driver(Provider.GCE)
print request['email']
print request['loc']
print request['projid']
'''
gce = Driver('{0}'.format(request['email']), "./PRIV.pem",
datacenter='{0}'.format(request['loc']),
project='{0}'.format(request['projid']))
'''
gce = Driver('265882800008-3blh6m3ocdfhkm6kl2ihhfsls0a44nd6@developer.gserviceaccount.com', './PRIV.pem',
datacenter='us-central1-a',
project='poised-resource-658')
nodes = gce.list_nodes()
for node in nodes :
if (node.id in listIns):
gce.destroy_node(node)
def del_inst_rackspace(request, listIns):
cls = get_driver(Provider.RACKSPACE)
driver = cls(request['rkusername'],request['apikey'])
nodes = driver.list_nodes()
for node in nodes :
if (node.id in listIns):
driver.destroy_node(node)
def delInstance(request,cb,depReq):
machines = request['delmachines']
listIns = []
for mc in machines:
listIns.append(mc['nodeid'])
pvm = request['vmprimary']
print "DEL"
print listIns
username = request['username']
depname = request['depname']
depIndex = request['deploymentIndex']
result = cb.get("user::{0}".format(username)).value
a = 0;
listDns = []
listPip = []
if (request['provider'] == "AWS"):
for mc in machines:
listDns.append(mc['dns'])
else:
for mc in machines:
listPip.append(mc['pip'])
print listDns
newVM = []
if (request['provider'] == "AWS"):
for res in result['deploy'][depIndex]['vm']:
if res['dns'] not in listDns:
newVM.append(res)
else:
for res in result['deploy'][depIndex]['vm']:
if res['pip'] not in listPip:
newVM.append(res)
result['deploy'][depIndex]['vm'] = newVM
cb.set ('user::{0}'.format(username), result)
cpu = result['deploy'][depIndex]['request']['cpus']
result['deploy'][depIndex]['request']['cpus'] = int(cpu) - len(machines)
if request['provider'] == "AWS":
for res in listDns:
cmd='sudo ./couchbase-cli rebalance -c {0} --server-remove={1} -u Administrator -p password'.format(pvm,res)
p4 = subprocess.Popen(r'{0}'.format(cmd),
cwd = r'/root/opt/couchbase/bin', shell =True)
p4.wait()
del_aws_ins(request, listIns)
elif request['provider'] == "GCE":
for res in listPip:
cmd='sudo ./couchbase-cli rebalance -c {0} --server-remove={1} -u Administrator -p password'.format(pvm,res)
p4 = subprocess.Popen(r'{0}'.format(cmd),
cwd = r'/root/opt/couchbase/bin', shell =True)
p4.wait()
del_inst(request,listIns)
else:
for res in listPip:
cmd='sudo ./couchbase-cli rebalance -c {0} --server-remove={1} -u Administrator -p password'.format(pvm,res)
p4 = subprocess.Popen(r'{0}'.format(cmd),
cwd = r'/root/opt/couchbase/bin', shell =True)
p4.wait()
del_inst_rackspace(request,listIns)
request['status'] = "IHDEL"
print result
cb.set(depReq, request)
cb.set("user::{0}".format(username), result)
def main(argv):
depReq = argv[0]
cb = Couchbase.connect(bucket="default", host="localhost")
request = cb.get(depReq).value
if request['status'] == "RDDE":
request['status'] = "INUSEDE"
cb.set(depReq, request)
handleNewDeployment(request,cb,depReq)
elif request['status'] == "RDAD":
request['status'] = "INUSEAD"
cb.set(depReq, request)
handleNewInstances(request,cb,depReq)
elif request['status'] == "RDDEL":
request['status'] = "INUSEDEL"
cb.set(depReq, request)
delInstance(request,cb,depReq)
def convert_node_ip_string(st):
a = "{0}".format(st)
a = a.replace('[', '')
a = a.replace('u\'', '')
a = a.replace('\'','')
a = a.replace(']','')
return a
def aws_mkfile_add(vm, user, key):
'''
filekey = open("/tmp/piushs.pem".format(user), "w")
filekey.write(key)
filekey.close()
'''
file = "/tmp/test-{0}.ini".format(timestamp())
cfgfile = open(file,'w')
cb = Couchbase.connect(bucket = 'default', host = 'localhost')
val = cb.get(depReq).value
val['testmkfile'] = file
cb.set(depReq, val)
Config = ConfigParser.ConfigParser()
Config.add_section('global')
Config.set('global','username','ec2-user')
Config.set('global','ssh_key', "/tmp/{0}.pem".format(user))
Config.set('global','port', "8091")
Config.add_section('servers')
count = 1
for v in vm:
Config.set('servers', "{0}".format(count),"{0}".format(v['dns']))
count = count +1
Config.add_section('membase')
Config.set('membase','rest_username',"Administrator")
Config.set('membase','rest_password', "password")
Config.write(cfgfile)
cfgfile.close()
def gce_mkfile(nodesup, depReq):
file = "/tmp/test-{0}.ini".format(timestamp())
cfgfile = open(file,'w')
cb = Couchbase.connect(bucket = 'default', host = 'localhost')
val = cb.get(depReq).value
val['testmkfile'] = file
cb.set(depReq, val)
Config = ConfigParser.ConfigParser()
Config.add_section('global')
Config.set('global','username','couchbase')
Config.set('global','ssh_key', "/tmp/id_rsa")
Config.set('global','port', "8091")
Config.add_section('servers')
count = 1
for node in nodesup:
a = convert_node_ip_string(node.public_ips)
Config.set('servers', "{0}".format(count),"{0}".format(a))
count = count +1
Config.add_section('membase')
Config.set('membase','rest_username',"Administrator")
Config.set('membase','rest_password', "password")
Config.write(cfgfile)
cfgfile.close()
def gce_mkfile_cluster(nodesup, depReq):
file = "/tmp/test-{0}.ini".format(timestamp())
cfgfile = open(file,'w')
cb = Couchbase.connect(bucket = 'default', host = 'localhost')
Config = ConfigParser.ConfigParser()
Config.add_section('global')
Config.set('global','username','user')
Config.set('global','ssh_key', "/tmp/id_rsa")
Config.set('global','port', "8091")
Config.add_section('servers')
count = 1
for node in nodesup:
if count == 1:
a = convert_node_ip_string(node.public_ips)
Config.set('servers', "{0}".format(count),"{0}".format(a))
else:
a = convert_node_ip_string(node.private_ips)
Config.set('servers', "{0}".format(count),"{0}".format(a))
count = count +1
Config.add_section('membase')
Config.set('membase','rest_username',"Administrator")
Config.set('membase','rest_password', "password")
Config.write(cfgfile)
cfgfile.close()
def aws_mkfile(nodesup,depReq):
file = "/tmp/test-{0}.ini".format(timestamp())
cfgfile = open(file,'w')
cb = Couchbase.connect(bucket = 'default', host = 'localhost')
val = cb.get(depReq).value
val['testmkfile'] = file
cb.set(depReq, val)
Config = ConfigParser.ConfigParser()
Config.add_section('global')
Config.set('global','username','ec2-user')
Config.set('global','ssh_key', "/tmp/piushs.pem")
Config.set('global','port', "8091")
Config.add_section('servers')
count = 1
for node in nodesup:
a = convert_node_ip_string(node.public_ips)
Config.set('servers', "{0}".format(count),"{0}".format(node.__dict__['extra']['dns_name']))
count = count +1
Config.add_section('membase')
Config.set('membase','rest_username',"Administrator")
Config.set('membase','rest_password', "password")
Config.write(cfgfile)
cfgfile.close()
cfgfile = open("/tmp/testCluster.ini",'w')
Config.write(cfgfile)
cfgfile.close()
def rackspace_mkfile(nodesup,depReq):
file = "/tmp/test-{0}.ini".format(timestamp())
cb = Couchbase.connect(bucket = 'default', host = 'localhost')
val = cb.get(depReq).value
val['testmkfile'] = file
cb.set(depReq, val)
cfgfile = open(file,'w')
Config = ConfigParser.ConfigParser()
Config.add_section('global')
Config.set('global','username','root')
Config.set('global','ssh_key', "/tmp/rackspacepk")
Config.set('global','port', "8091")
Config.add_section('servers')
count = 1
for node in nodesup:
a = convert_node_ip_string(node.public_ips[0])
Config.set('servers', "{0}".format(count),"{0}".format(a))
count = count +1
Config.add_section('membase')
Config.set('membase','rest_username',"Administrator")
Config.set('membase','rest_password', "password")
Config.write(cfgfile)
cfgfile.close()
cfgfile = open("/tmp/testCluster.ini",'w')
Config.write(cfgfile)
cfgfile.close()
if __name__ == "__main__":
main(sys.argv[1:])
|
couchbaselabs/cloudhosting
|
Install/InstanceHadler.py
|
Python
|
apache-2.0
| 19,561
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Wrapper base class."""
from absl.testing import parameterized
import keras
import tensorflow.compat.v2 as tf
class ExampleWrapper(keras.layers.Wrapper):
"""Simple Wrapper subclass."""
def call(self, inputs, *args, **kwargs):
return self.layer(inputs, *args, **kwargs)
class WrapperTest(parameterized.TestCase):
def test_wrapper_from_config_no_mutation(self):
wrapper = ExampleWrapper(keras.layers.Dense(1))
config = wrapper.get_config()
config_copy = config.copy()
self.assertEqual(config, config_copy)
wrapper_from_config = ExampleWrapper.from_config(config)
new_config = wrapper_from_config.get_config()
self.assertEqual(new_config, config)
self.assertEqual(new_config, config_copy)
if __name__ == '__main__':
tf.test.main()
|
keras-team/keras
|
keras/layers/rnn/base_wrapper_test.py
|
Python
|
apache-2.0
| 1,487
|
# coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes defining OT problem(s) (objective function + utilities)."""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from ott.geometry import geometry
@jax.tree_util.register_pytree_node_class
class LinearProblem:
"""Holds the definition of a linear regularized OT problem and some tools."""
def __init__(self,
geom: geometry.Geometry,
a: Optional[jnp.ndarray] = None,
b: Optional[jnp.ndarray] = None,
tau_a: float = 1.0,
tau_b: float = 1.0):
"""Initializes the LinearProblem.
min_P<C, P> - eps H(P), s.t P.1 = a, Pt.1 = b.
Args:
geom: the geometry.Geometry object defining the ground geometry / cost of
the linear problem.
a: jnp.ndarray[n] representing the first marginal. If None, it will be
uniform.
b: jnp.ndarray[n] representing the first marginal. If None, it will be
uniform.
tau_a: if lower that 1.0, defines how much unbalanced the problem is on
the first marginal.
tau_b: if lower that 1.0, defines how much unbalanced the problem is on
the second marginal.
"""
self.geom = geom
self._a = a
self._b = b
self.tau_a = tau_a
self.tau_b = tau_b
def tree_flatten(self):
return ([self.geom, self._a, self._b],
{'tau_a': self.tau_a, 'tau_b': self.tau_b})
@classmethod
def tree_unflatten(cls, aux_data, children):
return cls(*children, **aux_data)
@property
def a(self):
num_a = self.geom.shape[0]
return jnp.ones((num_a,)) / num_a if self._a is None else self._a
@property
def b(self):
num_b = self.geom.shape[1]
return jnp.ones((num_b,)) / num_b if self._b is None else self._b
@property
def is_balanced(self):
return self.tau_a == 1.0 and self.tau_b == 1.0
@property
def epsilon(self):
return self.geom.epsilon
def get_transport_functions(self, lse_mode: bool):
"""Instantiates useful functions for Sinkhorn depending on lse_mode."""
geom = self.geom
if lse_mode:
marginal_a = lambda f, g: geom.marginal_from_potentials(f, g, 1)
marginal_b = lambda f, g: geom.marginal_from_potentials(f, g, 0)
app_transport = geom.apply_transport_from_potentials
else:
marginal_a = lambda f, g: geom.marginal_from_scalings(
geom.scaling_from_potential(f), geom.scaling_from_potential(g), 1)
marginal_b = lambda f, g: geom.marginal_from_scalings(
geom.scaling_from_potential(f), geom.scaling_from_potential(g), 0)
app_transport = lambda f, g, z, axis: geom.apply_transport_from_scalings(
geom.scaling_from_potential(f),
geom.scaling_from_potential(g), z, axis)
return marginal_a, marginal_b, app_transport
|
google-research/ott
|
ott/core/problems.py
|
Python
|
apache-2.0
| 3,372
|
from django.test import TestCase
from django.conf import settings
from restclients.models.sws import Term, Curriculum, Person
from restclients.exceptions import DataFailureException
from restclients.exceptions import InvalidSectionID, InvalidSectionURL
from restclients.exceptions import InvalidCanvasIndependentStudyCourse, InvalidCanvasSection
import restclients.sws.section as SectionSws
from restclients.sws import use_v5_resources
from datetime import datetime
class SWSTestSectionData(TestCase):
def test_final_exams(self):
with self.settings(
RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File',
RESTCLIENTS_PWS_DAO_CLASS='restclients.dao_implementation.pws.File'):
section = SectionSws.get_section_by_label('2013,summer,B BIO,180/A')
self.assertEquals(section.final_exam, None, "No final exam for B BIO 180")
section = SectionSws.get_section_by_label('2013,summer,MATH,125/G')
final_exam = section.final_exam
self.assertEquals(final_exam.is_confirmed, False, "Final exam for Math 125 isn't confirmed")
self.assertEquals(final_exam.no_exam_or_nontraditional, False, "Final exam for Math 125 isn't non-traditional")
section = SectionSws.get_section_by_label('2013,summer,TRAIN,101/A')
final_exam = section.final_exam
self.assertEquals(final_exam.is_confirmed, True, "Final exam for Train 101 is confirmed")
self.assertEquals(final_exam.no_exam_or_nontraditional, False, "Final exam for Train 101 isn't non-traditional")
self.assertEquals(final_exam.building, "KNE", "Has right final building")
self.assertEquals(final_exam.room_number, "012", "Has right room #")
start = final_exam.start_date
end = final_exam.end_date
self.assertEquals(start.year, 2013)
self.assertEquals(start.month, 6)
self.assertEquals(start.day, 2)
self.assertEquals(start.hour, 13)
self.assertEquals(start.minute, 30)
self.assertEquals(end.year, 2013)
self.assertEquals(end.month, 6)
self.assertEquals(end.day, 2)
self.assertEquals(end.hour, 16)
self.assertEquals(end.minute, 20)
def test_section_by_label(self):
with self.settings(
RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File',
RESTCLIENTS_PWS_DAO_CLASS='restclients.dao_implementation.pws.File'):
#Valid data, shouldn't throw any exceptions
SectionSws.get_section_by_label('2013,summer,TRAIN,100/A')
#Invalid data, should throw exceptions
self.assertRaises(InvalidSectionID,
SectionSws.get_section_by_label,
'')
self.assertRaises(InvalidSectionID,
SectionSws.get_section_by_label,
' ')
self.assertRaises(InvalidSectionID,
SectionSws.get_section_by_label,
'2012')
self.assertRaises(InvalidSectionID,
SectionSws.get_section_by_label,
'2012,summer')
self.assertRaises(InvalidSectionID,
SectionSws.get_section_by_label,
'2012,summer,TRAIN')
self.assertRaises(InvalidSectionID,
SectionSws.get_section_by_label,
'2012, summer, TRAIN, 100')
self.assertRaises(InvalidSectionID,
SectionSws.get_section_by_label,
'summer, TRAIN, 100/A')
self.assertRaises(InvalidSectionID,
SectionSws.get_section_by_label,
'2012,fall,TRAIN,100/A')
self.assertRaises(InvalidSectionID,
SectionSws.get_section_by_label,
'-2012,summer,TRAIN,100/A')
self.assertRaises(DataFailureException,
SectionSws.get_section_by_label,
'9999,summer,TRAIN,100/A')
#Valid section labels, no files for them
self.assertRaises(DataFailureException,
SectionSws.get_section_by_label,
'2012,summer,TRAIN,110/A')
self.assertRaises(DataFailureException,
SectionSws.get_section_by_label,
'2012,summer,TRAIN,100/B')
self.assertRaises(DataFailureException,
SectionSws.get_section_by_label,
'2012,summer,PHYS,121/B')
self.assertRaises(DataFailureException,
SectionSws.get_section_by_label,
'2012,summer,PHYS,121/BB')
self.assertRaises(DataFailureException,
SectionSws.get_section_by_label,
'2010,autumn,G H,201/A')
self.assertRaises(DataFailureException,
SectionSws.get_section_by_label,
'2010,autumn,CS&SS,221/A')
self.assertRaises(DataFailureException,
SectionSws.get_section_by_label,
'2010,autumn,KOREAN,101/A')
self.assertRaises(DataFailureException,
SectionSws.get_section_by_label,
'2010,autumn,CM,101/A')
def test_instructors_in_section(self):
with self.settings(
RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File',
RESTCLIENTS_PWS_DAO_CLASS='restclients.dao_implementation.pws.File'):
section = SectionSws.get_section_by_label('2013,winter,ASIAN,203/A')
self.assertEquals(len(section.get_instructors()), 1, "Correct number of instructors")
person1 = Person(uwregid="FBB38FE46A7C11D5A4AE0004AC494FFE")
self.assertEquals(section.is_instructor(person1), False, "Person is not instructor")
person2 = Person(uwregid="6DF0A9206A7D11D5A4AE0004AC494FFE")
self.assertEquals(section.is_instructor(person2), True, "Person is instructor")
section2 = SectionSws.get_section_by_label('2013,summer,TRAIN,101/A')
self.assertEquals(len(section2.get_instructors()), 2, "Correct number of instructors")
section3 = SectionSws.get_section_by_label('2013,spring,PHYS,121/A')
self.assertEquals(len(section3.get_instructors()), 5,
"Correct number of all instructors")
section3 = SectionSws.get_section_by_label('2013,spring,PHYS,121/A', False)
self.assertEquals(len(section3.get_instructors()), 4,
"Correct number of TSPrinted instructors")
def test_delegates_in_section(self):
with self.settings(
RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File',
RESTCLIENTS_PWS_DAO_CLASS='restclients.dao_implementation.pws.File'):
section = SectionSws.get_section_by_label('2013,winter,ASIAN,203/A')
self.assertEquals(len(section.grade_submission_delegates), 3,
"Correct number of delegates")
person1 = Person(uwregid="6DF0A9206A7D11D5A4AE0004AC494FFE")
self.assertEquals(section.is_grade_submission_delegate(person1), False, "Person is not delegate")
person2 = Person(uwregid="FBB38FE46A7C11D5A4AE0004AC494FFE")
self.assertEquals(section.is_grade_submission_delegate(person2), True, "Person is delegate")
def test_joint_sections(self):
with self.settings(
RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File',
RESTCLIENTS_PWS_DAO_CLASS='restclients.dao_implementation.pws.File'):
section = SectionSws.get_section_by_label('2013,winter,ASIAN,203/A')
joint_sections = SectionSws.get_joint_sections(section)
self.assertEquals(len(joint_sections), 1)
section = SectionSws.get_section_by_label('2013,winter,EMBA,503/A')
joint_sections = SectionSws.get_joint_sections(section)
self.assertEquals(len(joint_sections), 0)
#Failing because linked section json files haven't been made (Train 100 AA/AB)
def test_linked_sections(self):
with self.settings(
RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File',
RESTCLIENTS_PWS_DAO_CLASS='restclients.dao_implementation.pws.File'):
#Valid data, shouldn't throw any exceptions
section = SectionSws.get_section_by_label('2013,summer,TRAIN,100/A')
SectionSws.get_linked_sections(section)
#Invalid data, should throw exceptions
section.linked_section_urls = ['']
self.assertRaises(InvalidSectionURL,
SectionSws.get_linked_sections, section)
section.linked_section_urls = [' ']
self.assertRaises(InvalidSectionURL,
SectionSws.get_linked_sections, section)
section.linked_section_urls = ['2012,summer,TRAIN,100/A']
self.assertRaises(InvalidSectionURL,
SectionSws.get_linked_sections, section)
if use_v5_resources():
section.linked_section_urls = ['/student/v5/course/2012,summer,PHYS,121/B.json']
else:
section.linked_section_urls = ['/student/v4/course/2012,summer,PHYS,121/B.json']
self.assertRaises(DataFailureException,
SectionSws.get_linked_sections, section)
if use_v5_resources():
section.linked_section_urls = ['/student/v5/course/2010,autumn,CS&SS,221/A.json']
else:
section.linked_section_urls = ['/student/v4/course/2010,autumn,CS&SS,221/A.json']
self.assertRaises(DataFailureException,
SectionSws.get_linked_sections, section)
if use_v5_resources():
section.linked_section_urls = ['/student/v5/course/2010,autumn,KOREAN,101/A.json']
else:
section.linked_section_urls = ['/student/v4/course/2010,autumn,KOREAN,101/A.json']
self.assertRaises(DataFailureException,
SectionSws.get_linked_sections, section)
if use_v5_resources():
section.linked_section_urls = ['/student/v5/course/2010,autumn,G H,201/A.json']
else:
section.linked_section_urls = ['/student/v4/course/2010,autumn,G H,201/A.json']
self.assertRaises(DataFailureException,
SectionSws.get_linked_sections, section)
if use_v5_resources():
section.linked_section_urls = ['/student/v5/course/2010,autumn,CM,101/A.json']
else:
section.linked_section_urls = ['/student/v4/course/2010,autumn,CM,101/A.json']
self.assertRaises(DataFailureException,
SectionSws.get_linked_sections, section)
if use_v5_resources():
section.linked_section_urls = ['/student/v5/course/2012,autumn,PHYS,121/A.json',
'/student/v5/course/2012,autumn,PHYS,121/AC.json',
'/student/v5/course/2012,autumn,PHYS,121/BT.json']
else:
section.linked_section_urls = ['/student/v4/course/2012,autumn,PHYS,121/A.json',
'/student/v4/course/2012,autumn,PHYS,121/AC.json',
'/student/v4/course/2012,autumn,PHYS,121/BT.json']
self.assertRaises(DataFailureException,
SectionSws.get_linked_sections, section)
if use_v5_resources():
section.linked_section_urls = ['/student/v5/course/2012,autumn,PHYS,121/A.json',
'/student/v5/course/2012,autumn,PHYS,121/AC.json',
'/student/v5/course/2012,autumn,PHYS,121/AAA.json']
else:
section.linked_section_urls = ['/student/v4/course/2012,autumn,PHYS,121/A.json',
'/student/v4/course/2012,autumn,PHYS,121/AC.json',
'/student/v4/course/2012,autumn,PHYS,121/AAA.json']
self.assertRaises(DataFailureException,
SectionSws.get_linked_sections, section)
def test_sections_by_instructor_and_term(self):
with self.settings(
RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File',
RESTCLIENTS_PWS_DAO_CLASS='restclients.dao_implementation.pws.File'):
term = Term(quarter="summer", year=2013)
instructor = Person(uwregid="FBB38FE46A7C11D5A4AE0004AC494FFE")
sections = SectionSws.get_sections_by_instructor_and_term(instructor, term)
self.assertEquals(len(sections), 1)
def test_sections_by_delegate_and_term(self):
with self.settings(
RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File',
RESTCLIENTS_PWS_DAO_CLASS='restclients.dao_implementation.pws.File'):
term = Term(quarter="summer", year=2013)
delegate = Person(uwregid="FBB38FE46A7C11D5A4AE0004AC494FFE")
sections = SectionSws.get_sections_by_delegate_and_term(delegate, term)
self.assertEquals(len(sections), 2)
def test_sections_by_curriculum_and_term(self):
with self.settings(
RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File',
RESTCLIENTS_PWS_DAO_CLASS='restclients.dao_implementation.pws.File'):
term = Term(quarter="winter", year=2013)
curriculum = Curriculum(label="ENDO")
sections = SectionSws.get_sections_by_curriculum_and_term(curriculum, term)
self.assertEquals(len(sections), 2)
# Valid curriculum, with no file
self.assertRaises(DataFailureException,
SectionSws.get_sections_by_curriculum_and_term,
Curriculum(label="FINN"),
term)
def test_changed_sections_by_term(self):
with self.settings(
RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File',
RESTCLIENTS_PWS_DAO_CLASS='restclients.dao_implementation.pws.File'):
changed_date = datetime(2013, 12, 12).date()
term = Term(quarter="winter", year=2013)
sections = SectionSws.get_changed_sections_by_term(changed_date, term)
self.assertEquals(len(sections), 2)
def test_instructor_published(self):
with self.settings(
RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File',
RESTCLIENTS_PWS_DAO_CLASS='restclients.dao_implementation.pws.File'):
# Published Instructors
pi_section = SectionSws.get_section_by_label('2013,summer,B BIO,180/A')
self.assertEquals(pi_section.meetings[0].instructors[0].TSPrint, True)
# Unpublished Instructors
upi_section = SectionSws.get_section_by_label('2013,summer,MATH,125/G')
self.assertEquals(upi_section.meetings[0].instructors[0].TSPrint, False)
def test_secondary_grading(self):
with self.settings(
RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File',
RESTCLIENTS_PWS_DAO_CLASS='restclients.dao_implementation.pws.File'):
section1 = SectionSws.get_section_by_label('2012,summer,PHYS,121/A')
self.assertEquals(section1.allows_secondary_grading, True,
"Allows secondary grading")
for linked in SectionSws.get_linked_sections(section1):
self.assertEquals(linked.allows_secondary_grading, True,
"Allows secondary grading")
section2 = SectionSws.get_section_by_label('2013,winter,EMBA,503/A')
self.assertEquals(section2.allows_secondary_grading, False,
"Does not allow secondary grading")
def test_grading_period_open(self):
with self.settings(
RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File',
RESTCLIENTS_PWS_DAO_CLASS='restclients.dao_implementation.pws.File'):
section = SectionSws.get_section_by_label('2012,summer,PHYS,121/A')
self.assertEquals(section.is_grading_period_open(), False, "Grading window is not open")
# Spring 2013 is 'current' term
section = SectionSws.get_section_by_label('2013,spring,MATH,125/G')
self.assertEquals(section.is_grading_period_open(), True, "Grading window is open")
def test_canvas_sis_ids(self):
with self.settings(
RESTCLIENTS_SWS_DAO_CLASS='restclients.dao_implementation.sws.File',
RESTCLIENTS_PWS_DAO_CLASS='restclients.dao_implementation.pws.File'):
# Primary section containing linked secondary sections
section = SectionSws.get_section_by_label('2012,summer,PHYS,121/A')
self.assertEquals(section.canvas_course_sis_id(),
'2012-summer-PHYS-121-A', 'Canvas course SIS ID')
self.assertRaises(InvalidCanvasSection,
section.canvas_section_sis_id)
# Primary section with no linked sections
section = SectionSws.get_section_by_label('2013,autumn,REHAB,585/A')
self.assertEquals(section.canvas_course_sis_id(),
'2013-autumn-REHAB-585-A', 'Canvas course SIS ID')
self.assertEquals(section.canvas_section_sis_id(),
'2013-autumn-REHAB-585-A--', 'Canvas section SIS ID')
# Secondary (linked) section
section = SectionSws.get_section_by_label('2013,autumn,PHYS,121/AB')
self.assertEquals(section.canvas_course_sis_id(),
'2013-autumn-PHYS-121-A', 'Canvas course SIS ID')
self.assertEquals(section.canvas_section_sis_id(),
'2013-autumn-PHYS-121-AB', 'Canvas section SIS ID')
# Independent study section
section = SectionSws.get_section_by_label('2013,summer,PHIL,600/A')
# ..missing instructor regid
self.assertRaises(InvalidCanvasIndependentStudyCourse,
section.canvas_course_sis_id)
section.independent_study_instructor_regid = 'A9D2DDFA6A7D11D5A4AE0004AC494FFE'
self.assertEquals(section.canvas_course_sis_id(),
'2013-summer-PHIL-600-A-A9D2DDFA6A7D11D5A4AE0004AC494FFE',
'Canvas course SIS ID')
self.assertEquals(section.canvas_section_sis_id(),
'2013-summer-PHIL-600-A-A9D2DDFA6A7D11D5A4AE0004AC494FFE--',
'Canvas section SIS ID')
|
jeffFranklin/uw-restclients
|
restclients/test/sws/section.py
|
Python
|
apache-2.0
| 19,649
|
#!/usr/bin/python3
class ElbConfig(object):
def __init__(self, elb_listeners_config, elb_health_check,
public_unit, elb_log_bucket, ssl_certificate_id, healthy_threshold, unhealthy_threshold,
interval, timeout, owner):
"""
Simple config class to contain elb related parameters
:param elb_listeners_config: List of ELB listener configs
:param elb_health_check: path for ELB healthcheck
:param public_unit: Boolean to determine if the elb scheme will be internet-facing or private
:param elb_log_bucket: S3 bucket to log access log to
:param healthy_threshold: Number of consecutive health check successes before marking as Healthy
:param unhealthy_threshold: Number of consecutive health check successes before marking as Unhealthy
:param interval: Interval between health checks
:param timeout: Amount of time during which no response means a failed health check
"""
self.elb_health_check = elb_health_check
self.public_unit = public_unit
self.elb_log_bucket = elb_log_bucket
self.ssl_certificate_id = ssl_certificate_id
self.elb_listeners_config = elb_listeners_config
self.healthy_threshold = healthy_threshold
self.unhealthy_threshold = unhealthy_threshold
self.interval = interval
self.timeout = timeout
self.owner = owner
class ElbListenersConfig(object):
def __init__(self, instance_protocol, loadbalancer_protocol, instance_port, loadbalancer_port, sticky_app_cookie):
"""
Simple ELB listener config class to contain elb listener related parameters
:param instance_protocol: instance_protocol for ELB to communicate with webserver
:param loadbalancer_protocol: loadbalancer_protocol for world to communicate with ELB
:param instance_port: ports for ELB and webserver to communicate via
:param loadbalancer_port: ports for public and ELB to communicate via
:param sticky_app_cookie: name sticky app cookie
"""
self.instance_protocol = instance_protocol
self.loadbalancer_protocol = loadbalancer_protocol
self.instance_port = instance_port
self.loadbalancer_port = loadbalancer_port
self.sticky_app_cookie = sticky_app_cookie if sticky_app_cookie else None
|
GeoscienceAustralia/amazonia
|
amazonia/classes/elb_config.py
|
Python
|
apache-2.0
| 2,381
|
# Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to get metrics.
This module computes F1, MSE, L1, L2 metrics for the image by transforming
the result to be comparable with the original image.
"""
import dataclasses
import numpy as np
from scipy import stats
from sklearn import metrics as mt
@dataclasses.dataclass
class Metrics:
"""Stores obtained metrics.
Attributes:
mse: mean squared error.
l1_distance: L1 distance.
l2_distance: L2 distance.
wasserstein: Wasserstein distance (e.g. earth's movers distance).
hotspots_count: count of the current hotspots.
f1: f1 score on the discovered hot spots.
mutual_info: mutual information metric.
"""
mse: float
l1_distance: float
l2_distance: float
wasserstein: float
hotspots_count: int
f1: float
mutual_info: float
def rescale_image(image: np.ndarray, total_size: int):
"""Scale up the image to a certain size.
Naive scaling method for a provided image with two dimensions of some
size perform scaling such that the final image has levelxlevel size. This
method simply duplicates values into the larger pixels.
Args:
image: initial 'unscaled' square-size image (np.array)
total_size: desired dimension, power of 2, divisible by the image size.
Returns:
scaled image array of size total_size x total_size.
"""
if total_size % image.shape[0] != 0:
raise ValueError('Provided scale size has to be divisible by image size.')
if image.shape[0] != image.shape[1]:
raise ValueError('Provided image needs to have a squared size.')
scale = int(total_size / image.shape[0])
new_image = np.zeros([total_size, total_size])
for i in range(scale):
for j in range(scale):
new_image[i::scale, j::scale] = image
return new_image
def normalize(vector: np.ndarray):
"""Normalizes the np.array to sum up to one and clips negative values to 0."""
arr = np.copy(vector)
arr[arr < 0] = 0
arr = arr / np.sum(arr)
return arr
def largest_indices(array: np.ndarray, top_k: int):
"""Compute top-k coordinates of the provided array.
Takes an image as np.array, computes indices of the largest elements, and
returns the list of the coordinates and an image with the largest elements
having value 1 and the rest of the image is 0.
Args:
array: data array
top_k: number of elements to select
Returns:
list of top k coordinates, zero array except top-k coordinates set to 1.
"""
flat = array.flatten()
# find the top-k elements (unsorted) in the flattened array
indices = np.argpartition(flat, -top_k)[-top_k:]
# unravel the flattened indices into the image shape
unraveled = np.unravel_index(indices, array.shape)
# create a set of coordinates with top-k elements and create an image.
tuples = set()
top_k_arr = np.zeros_like(array)
for i in range(top_k):
x_coord = unraveled[0][i]
y_coord = unraveled[1][i]
tuples.add((x_coord, y_coord))
top_k_arr[x_coord, y_coord] = 1
return tuples, top_k_arr
def get_metrics(test_image, true_image, top_k, total_size):
"""Computes multiple different metrics between two images.
We compute a variety of metrics on the input image: we output L1 and L2
distances, Wasserstein (earth movers) distance, hotspot count and f1 score for
the provided TOP-K parameter, and an MSE error. For the correct comparison the
images are scaled to the same size first,and then compared per coordinate.
Args:
test_image: obtained image to obtain the metrics
true_image: original image to compare against the test_image.
top_k: parameter to compute top-k hot spots.
total_size: the size to scale the images to.
Returns:
l2 dist, hot spot counts, movers distance, f1-score, l1 dist, mutual info,
MSE.
"""
# normalize the input images
test_image = normalize(rescale_image(test_image, total_size))
true_image = normalize(rescale_image(true_image, total_size))
top_k_test, top_k_test_arr = largest_indices(test_image, top_k)
top_k_true, top_k_true_arr = largest_indices(true_image, top_k)
l1_distance = np.linalg.norm(true_image - test_image, ord=1)
l2_distance = np.linalg.norm(true_image - test_image, ord=2)
mse = mt.mean_squared_error(test_image, true_image)
top_k_diff = len(top_k_true.intersection(top_k_test))
wasserstein = stats.wasserstein_distance(
test_image.reshape(-1), true_image.reshape(-1))
f1 = mt.f1_score(top_k_true_arr.reshape(-1), top_k_test_arr.reshape(-1))
mutual = mt.mutual_info_score(true_image.reshape(-1), test_image.reshape(-1))
metrics = Metrics(l1_distance=l1_distance, l2_distance=l2_distance,
mse=mse, f1=f1, wasserstein=wasserstein,
hotspots_count=top_k_diff, mutual_info=mutual)
return metrics
|
google-research/federated
|
analytics/location_heatmaps/metrics.py
|
Python
|
apache-2.0
| 5,325
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-07 01:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organisation', '0007_auto_20170323_1140'),
]
operations = [
migrations.AlterField(
model_name='departmentuser',
name='ad_guid',
field=models.CharField(editable=False, help_text='Locally stored GUID. This field must match GUID in the AD object for sync to be successful', max_length=48, unique=True),
),
]
|
rockychen-dpaw/oim-cms
|
organisation/migrations/0008_auto_20170407_0930.py
|
Python
|
apache-2.0
| 598
|
"""
Copyright (C) 2017 Open Source Robotics Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import numpy as np
import latlon
import enu
class Ecef(object):
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
@classmethod
def equatorial_radius(cls):
# Semi-major axis of the WGS84 spheroid (meters)
return 6378137.0
@classmethod
def polar_radius(cls):
# Semi-minor axis of the wgs84 spheroid (meters)
return 6356752.314245
@classmethod
def first_eccentricity_parameter(cls):
# https://en.wikipedia.org/wiki/Eccentricity_(mathematics)#Ellipses
# As a reference, this constant was computed as math.sqrt(1.0 - (Ecef.polar_radius() ** 2) / (Ecef.equatorial_radius() ** 2))
return 0.08181919084296556
@classmethod
def second_eccectricity_parameter(cls):
# https://en.wikipedia.org/wiki/Eccentricity_(mathematics)#Ellipses
# As a reference, this constant was computed as math.sqrt((Ecef.equatorial_radius() ** 2) / (Ecef.polar_radius() ** 2) - 1.0)
return 0.08209443795004348
def to_latlon(self):
lon_in_radians = math.atan2(self.y, self.x)
lon_in_degrees = math.degrees(lon_in_radians)
xy_norm = math.sqrt(self.x ** 2 + self.y ** 2)
a = Ecef.equatorial_radius()
b = Ecef.polar_radius()
p = Ecef.second_eccectricity_parameter()
e = Ecef.first_eccentricity_parameter()
angle = math.atan((self.z * a) / (xy_norm * b))
lat_in_radians = math.atan((self.z + ((p ** 2) * b * (math.sin(angle) ** 3))) /
(xy_norm - (e ** 2) * a * (math.cos(angle) ** 3)))
lat_in_degrees = math.degrees(lat_in_radians)
return latlon.LatLon(lat_in_degrees, lon_in_degrees)
def to_global(self, origin):
# this doesn't work at the poles because longitude is not uniquely defined there
sin_lon = origin._sin_lon()
sin_lat = origin._sin_lat()
cos_lon = origin._cos_lon()
cos_lat = origin._cos_lat()
local_vector_in_ecef = self - origin.to_ecef()
ecef_vector = np.array([[local_vector_in_ecef.x],
[local_vector_in_ecef.y],
[local_vector_in_ecef.z]])
ecef_to_global_matrix = np.array([[-sin_lon, cos_lon, 0.0],
[-cos_lon * sin_lat, -sin_lon * sin_lat, cos_lat],
[cos_lon * cos_lat, sin_lon * cos_lat, sin_lat]])
enu_vector = np.dot(ecef_to_global_matrix, ecef_vector)
return enu.Enu(enu_vector[0][0], enu_vector[1][0], enu_vector[2][0])
def __eq__(self, other):
return self.x == other.x and self.y == other.y and self.z == other.z
def __hash__(self):
return hash((self.x, self.y, self.z))
def __add__(self, other):
return Ecef(self.x + other.x, self.y + other.y, self.z + other.z)
def __sub__(self, other):
return Ecef(self.x - other.x, self.y - other.y, self.z - other.z)
|
ekumenlabs/terminus
|
terminus/geometry/ecef.py
|
Python
|
apache-2.0
| 3,608
|
import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=8, path_list=[
[TestAction.create_vm, 'vm1', 'flag=sblk'],
[TestAction.create_volume, 'volume1', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'flag=sblk,scsi'],
[TestAction.attach_volume, 'vm1', 'volume2'],
[TestAction.create_volume, 'volume3', 'flag=ceph,scsi'],
[TestAction.attach_volume, 'vm1', 'volume3'],
[TestAction.create_volume_snapshot, 'volume3', 'volume3-snapshot1'],
[TestAction.create_volume_snapshot, 'vm1-root', 'vm1-root-snapshot2'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot3'],
[TestAction.create_volume_snapshot, 'vm1-root', 'vm1-root-snapshot7'],
[TestAction.create_volume_snapshot, 'vm1-root', 'vm1-root-snapshot8'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot9'],
[TestAction.create_volume_snapshot, 'volume3', 'volume3-snapshot13'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot14'],
[TestAction.create_vm_snapshot, 'vm1', 'vm1-snapshot18'],
[TestAction.stop_vm, 'vm1'],
[TestAction.use_vm_snapshot, 'vm1-snapshot3'],
[TestAction.start_vm, 'vm1'],
[TestAction.stop_vm, 'vm1'],
[TestAction.reinit_vm, 'vm1'],
[TestAction.start_vm, 'vm1'],
[TestAction.delete_volume_snapshot, 'vm1-root-snapshot8'],
[TestAction.delete_volume_snapshot, 'vm1-root-snapshot7'],
[TestAction.delete_volume_snapshot, 'volume2-snapshot3'],
])
'''
The final status:
Running:['vm1']
Stopped:[]
Enadbled:['volume3-snapshot1', 'vm1-root-snapshot2', 'vm1-snapshot3', 'volume1-snapshot3', 'volume3-snapshot3', 'vm1-snapshot9', 'volume1-snapshot9', 'volume2-snapshot9', 'volume3-snapshot9', 'volume3-snapshot13', 'vm1-snapshot14', 'volume1-snapshot14', 'volume2-snapshot14', 'volume3-snapshot14', 'vm1-snapshot18', 'volume1-snapshot18', 'volume2-snapshot18', 'volume3-snapshot18']
attached:['volume1', 'volume2', 'volume3']
Detached:[]
Deleted:['vm1-root-snapshot8', 'vm1-root-snapshot7', 'volume2-snapshot3']
Expunged:[]
Ha:[]
Group:
vm_snap2:['vm1-snapshot9', 'volume1-snapshot9', 'volume2-snapshot9', 'volume3-snapshot9']---vm1volume1_volume2_volume3
vm_snap3:['vm1-snapshot14', 'volume1-snapshot14', 'volume2-snapshot14', 'volume3-snapshot14']---vm1volume1_volume2_volume3
vm_snap1:['vm1-snapshot3', 'volume1-snapshot3', 'volume2-snapshot3', 'volume3-snapshot3']---vm1volume1_volume2_volume3
vm_snap4:['vm1-snapshot18', 'volume1-snapshot18', 'volume2-snapshot18', 'volume3-snapshot18']---vm1volume1_volume2_volume3
'''
|
zstackio/zstack-woodpecker
|
integrationtest/vm/multihosts/vm_snapshots/paths/sc_path33.py
|
Python
|
apache-2.0
| 2,622
|
# coding: utf-8
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from wavefront_api_client.configuration import Configuration
class ResponseContainerUserApiToken(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'response': 'UserApiToken',
'status': 'ResponseStatus'
}
attribute_map = {
'response': 'response',
'status': 'status'
}
def __init__(self, response=None, status=None, _configuration=None): # noqa: E501
"""ResponseContainerUserApiToken - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._response = None
self._status = None
self.discriminator = None
if response is not None:
self.response = response
self.status = status
@property
def response(self):
"""Gets the response of this ResponseContainerUserApiToken. # noqa: E501
:return: The response of this ResponseContainerUserApiToken. # noqa: E501
:rtype: UserApiToken
"""
return self._response
@response.setter
def response(self, response):
"""Sets the response of this ResponseContainerUserApiToken.
:param response: The response of this ResponseContainerUserApiToken. # noqa: E501
:type: UserApiToken
"""
self._response = response
@property
def status(self):
"""Gets the status of this ResponseContainerUserApiToken. # noqa: E501
:return: The status of this ResponseContainerUserApiToken. # noqa: E501
:rtype: ResponseStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ResponseContainerUserApiToken.
:param status: The status of this ResponseContainerUserApiToken. # noqa: E501
:type: ResponseStatus
"""
if self._configuration.client_side_validation and status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ResponseContainerUserApiToken, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResponseContainerUserApiToken):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ResponseContainerUserApiToken):
return True
return self.to_dict() != other.to_dict()
|
wavefrontHQ/python-client
|
wavefront_api_client/models/response_container_user_api_token.py
|
Python
|
apache-2.0
| 4,769
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Finite difference approximations of the Fisher-Rao norm regularizer.
The provided utility routines are used to create Fisher-Rao norm regularizers.
The implementations use finite difference perturbations of the parameters in the
original loss to approximate the necessary gradient-vector products.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class VariableCollector(object):
"""Helper class with custom getter to collect `Variable` objects in a scope.
When called for the first time the custom getter stores the corresponding
`Variable` object in dictionary.
When called subsequently for the same `Variable` name the getter will return
the object from the dictionary instead of calling the original getter.
"""
def __init__(self):
self.variables = {}
def collector_getter(self, getter, name, *args, **kwargs):
"""Custom getter for `VariableScope` that stores `Variable` in dictionary.
Args:
getter: Function, original getter function
name: String, name of `Variable` in the scope
*args: Additional arguments, currently only passed forward on first
call of the original getter
**kwargs: Additional arguments, currently only passed forward on first
call of the original getter
Returns:
A `Tensor` object that contains the named `Variable` either from calling
the original getter or if available from the dictionary.
"""
if name not in self.variables:
self.variables[name] = getter(name, *args, **kwargs)
# TODO(jonathanjh): Add consistency check for args and kwargs.
return self.variables[name]
def make_perturbation_getter(should_regularize, collector, perturbation):
"""Creates custom getter to replace variables in scope by their perturbations.
Args:
should_regularize: Function, takes a variable name as String and returns
Boolean that decides whether the variable should be regularized.
collector: `VariableCollector` object that provides the dictionary to use
for subsequent use of the same `Variable` object for the same name.
perturbation: Float, perturbation value added on top of 1 to use for
variable replacement value.
Returns:
A custom getter function that can be used with `VariableScope`.
"""
def plus_getter(getter, name, *args, **kwargs):
var = collector.collector_getter(getter, name, *args, **kwargs)
if should_regularize(name) and kwargs.get("trainable"):
var = (1. + perturbation) * var
return var
return plus_getter
def make_empirical_fisher_regularizer(make_logits, labels, scope,
should_regularize, perturbation):
"""Creates per-example logits and the per-example empirical Fisher-Rao norm.
This function assumes the model of a categorical distribution generated by a
softmax function.
The empirical Fisher-Rao norm uses the empirical training distribution for
both the input values and the labels to estimate the Fisher information
matrix.
Args:
make_logits: Function, returns `Tensor` representing the per-example logits.
The expected shape of the tensor is such that the number of categories
is the last dimension.
labels: Tensor, encoding of the class labels compatible in dimension with
the return of the make_logits function.
scope: String, name of `VariableScope` to use for the `Variable` objects
that represent the regularized parameter.
should_regularize: Function, takes a variable name as String and returns
Boolean that decides whether the variable should be regularized.
The passed variable name includes the name of the scope.
perturbation: Float, finite difference perturbation constant.
The choice of perturbation constant represents a tradeoff between rounding
and approximation error and should depend on floating point precision and
parameter norm.
Returns:
A tuple of `Tensor` objects representing the per-example logits and the
scalar empirical Fisher-Rao norm regularization loss.
Raises:
ValueError: if the last dimension of the logits shape is not statically
inferrable.
"""
collector = VariableCollector()
with tf.variable_scope(scope, custom_getter=collector.collector_getter):
logits = make_logits()
if logits.shape[-1].value is None:
raise ValueError("The size of the last dimension of the logits vector must"
" be statically inferrable.")
with tf.variable_scope(
scope,
custom_getter=
make_perturbation_getter(should_regularize, collector, perturbation)):
perturbed_logits = make_logits()
loss = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
perturbed_loss = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=perturbed_logits)
regularizer = tf.square(
tf.divide(tf.subtract(perturbed_loss, loss), perturbation))
regularizer = tf.reduce_mean(regularizer)
return (logits, regularizer)
def make_standard_fisher_regularizer(make_logits, scope, should_regularize,
perturbation, differentiate_probability):
"""Creates per-example logits and the per-example standard Fisher-Rao norm.
This function assumes the model of a categorical distribution generated by a
softmax function.
The standard Fisher-Rao norm uses the model distribution computed from the
logits by the softmax function to estimate the Fisher information matrix.
The empirical training distribution is used for the input values.
Args:
make_logits: Function, returns `Tensor` representing the per-example logits.
The expected shape of the tensor is such that the number of categories
is the last dimension.
scope: String, name of `VariableScope` to use for the `Variable` objects
that represent the regularized parameter.
should_regularize: Function, takes a variable name as String and returns
Boolean that decides whether the variable should be regularized.
The passed variable name includes the name of the scope.
perturbation: Float, finite difference perturbation constant.
The choice of perturbation constant represents a tradeoff between rounding
and approximation error and should depend on floating point precision and
parameter norm.
differentiate_probability: Boolean, determines whether the label probability
distribution should be differentiated.
Returns:
A tuple of `Tensor` objects representing the per-example logits and the
scalar standard Fisher-Rao norm regularization loss.
Raises:
ValueError: if the last dimension of the logits shape is not statically
inferrable.
"""
collector = VariableCollector()
with tf.variable_scope(scope, custom_getter=collector.collector_getter):
logits = make_logits()
if logits.shape[-1].value is None:
raise ValueError("The size of the last dimension of the logits vector must"
" be statically inferrable.")
with tf.variable_scope(
scope,
custom_getter=
make_perturbation_getter(should_regularize, collector, perturbation)):
perturbed_logits = make_logits()
log_probs = tf.nn.log_softmax(logits, axis=-1)
perturbed_log_probs = tf.nn.log_softmax(perturbed_logits, axis=-1)
stop_probs = tf.stop_gradient(tf.exp(log_probs))
log_prob_derivative = (tf.square((perturbed_log_probs - log_probs) /
perturbation))
if differentiate_probability:
prob_regularizer_loss = (log_prob_derivative * stop_probs +
tf.stop_gradient(log_prob_derivative) * log_probs *
stop_probs -
tf.stop_gradient(log_prob_derivative * log_probs *
stop_probs))
else:
prob_regularizer_loss = log_prob_derivative * stop_probs
regularizer = logits.shape[-1].value * tf.reduce_mean(prob_regularizer_loss)
return (logits, regularizer)
|
brain-research/fisher-rao-regularization
|
direct_fisher_rao.py
|
Python
|
apache-2.0
| 8,711
|
import settings
import logging
import struct
from ingest import Ingest
from appAux import loadFile
import pyregf
from AmCacheParser import _processAmCacheFile_StringIO
import settings
import ntpath
logger = logging.getLogger(__name__)
# Module to ingest AmCache data
# File name and format is what you get from a Mir or HX FileAcquisition audit
# Note: Exactly the same as amcache_raw_hive with a different file_name_filter
class Amcache_miracquisition(Ingest):
ingest_type = "amcache_miracquisition"
file_name_filter = "(?:.*)(?:\/|\\\)(.*)(?:-[A-Za-z0-9]{64}-\d{1,10}-\d{1,10}_octet-stream\.xml|_[a-zA-Z0-9]{22}\.xml)$"
def __init__(self):
super(Amcache_miracquisition, self).__init__()
def getHostName(self, file_name_fullpath):
if not settings.__PYREGF__:
logger.warning("AmCache processing disabled (missing pyregf) skipping file: %s" % file_name_fullpath)
else: return super(Amcache_miracquisition, self).getHostName(file_name_fullpath)
def checkMagic(self, file_name_fullpath):
magic_ok = False
# Check magic
magic_id = self.id_filename(file_name_fullpath)
if 'registry file' in magic_id:
# Perform a deeper check using pyregf
file_object = loadFile(file_name_fullpath)
regf_file = pyregf.file()
regf_file.open_file_object(file_object, "r")
magic_key = regf_file.get_key_by_path(r'Root\File')
if magic_key is None:
# Check if it's a Windows 10 AmCache hive
magic_key = regf_file.get_key_by_path(r'Root\InventoryApplicationFile')
regf_file.close()
del regf_file
if magic_key is not None:
magic_ok = True
# Need to close these or the memory will never get freed:
file_object.close()
del file_object
return magic_ok
def calculateID(self, file_name_fullpath):
instanceID = None
file_object = loadFile(file_name_fullpath)
regf_file = pyregf.file()
regf_file.open_file_object(file_object, "r")
tmp = regf_file.get_key_by_path(r'Root\File')
if regf_file.get_key_by_path(r'Root\File') is None and regf_file.get_key_by_path(r'Root\InventoryApplicationFile') is None:
logger.warning("Not an AmCache hive! [%s]" % file_name_fullpath)
else:
instanceID = regf_file.root_key.last_written_time
# Need to close these or the memory will never get freed:
regf_file.close()
del regf_file
file_object.close()
del file_object
return instanceID
def processFile(self, file_fullpath, hostID, instanceID, rowsData):
rowNumber = 0
file_object = loadFile(file_fullpath)
rows = _processAmCacheFile_StringIO(file_object)
file_object.close()
for r in rows:
namedrow = settings.EntriesFields(HostID = hostID, EntryType = settings.__AMCACHE__,
RowNumber = rowNumber,
FilePath = (None if r.path == None else ntpath.dirname(r.path)),
FileName = (None if r.path == None else ntpath.basename(r.path)),
Size = r.size, ExecFlag = 'True',
SHA1 = (None if r.sha1 == None else r.sha1[4:]),
FileDescription = r.file_description,
FirstRun = r.first_run,
Created = r.created_timestamp,
Modified1 = r.modified_timestamp,
Modified2 = r.modified_timestamp2,
LinkerTS = r.linker_timestamp,
Product = r.product,
Company = r.company,
PE_sizeofimage = r.pe_sizeofimage,
Version_number = r.version_number,
Version = r.version,
Language = r.language,
Header_hash = r.header_hash,
PE_checksum = r.pe_checksum,
SwitchBackContext = r.switchbackcontext,
InstanceID = instanceID)
rowsData.append(namedrow)
rowNumber += 1
|
mbevilacqua/appcompatprocessor
|
Ingest/amcache_miracquisition.py
|
Python
|
apache-2.0
| 4,034
|
import urllib
from datetime import datetime
from threading import Thread
from Queue import Queue
base_url="http://ichart.finance.yahoo.com/table.csv?"
def get_historical(symbols,start=None,end=None,threads=0):
if isinstance(symbols,str):
return get_historical_single(symbols,start,end)
quotes={}
if threads:
def quoter():
while True:
data = q.get()
quotes[data[0]]=get_historical_single(data[0],data[1],data[2])
q.task_done()
q = Queue()
for i in range(threads):
t = Thread(target=quoter)
t.setDaemon(True)
t.start()
for sym in symbols: q.put((sym,start,end))
q.join()
else:
for sym in symbols:
quotes[sym]=get_historical_single(sym,start,end)
return quotes
def get_historical_single(symbol,start=None,end=None):
full_url=base_url+"&s="+symbol
if start:
full_url+="&a=%i&b=%i&c=%i"%(start.month-1,start.day,start.year)
if end:
full_url+="&d=%i&e=%i&f=%i"%(end.month-1,end.day,end.year)
full_url+="&g=d"
quotes={}
quotes['raw']=[]
quotes['by_date']={}
quotes['dates']=[]
quotes['opens']=[]
quotes['highs']=[]
quotes['lows']=[]
quotes['closes']=[]
quotes['volumes']=[]
quotes['adjusted_closes']=[]
quotes_lines=urllib.urlopen(full_url).read().split('\n')[1:-1]
for quote_line in quotes_lines:
#quote_line structure: Date,Open,High,Low,Close,Volume,Adj Close
splt_q=quote_line.split(',')
date=datetime(*(map(int,splt_q[0].split('-'))))
op=float(splt_q[1])
hi=float(splt_q[2])
lo=float(splt_q[3])
close=float(splt_q[4])
volume=int(splt_q[5])
adj_close=float(splt_q[6])
quote=dict(date=date,open=op,high=hi,low=lo,close=close,volume=volume,adj_close=adj_close)
quotes['raw'].append(quote)
quotes['by_date'][date]=quote
quotes['dates'].append(date)
quotes['opens'].append(op)
quotes['highs'].append(hi)
quotes['lows'].append(lo)
quotes['closes'].append(close)
quotes['volumes'].append(volume)
quotes['adjusted_closes'].append(adj_close)
return quotes
if __name__ == '__main__':
start_date=datetime(2005,1,1)
symbols=['F.MI','AAPL','IBM','GOOG']
quotes=get_historical(symbols,start=start_date,threads=4)
for k in symbols:
print '%s: %i quotes'%(k,len(quotes[k]['closes']))
|
iamaris/pystock
|
acstate.py
|
Python
|
apache-2.0
| 2,596
|
# Auther: Alan
from PIL import Image
import numpy as np
def test1():
input_path = "./laska.png"
maxsize = (32, 32)
# box = (0, 0, 28, 28)
lena = Image.open(input_path)
# print(type(lena))
lena.thumbnail(maxsize, Image.ANTIALIAS)
lena.save("temp.jpg", "JPEG")
# lena = Image.open("temp.jpg").crop(box)
minsize = (224, 224)
input_path = "./temp.jpg"
lena = Image.open(input_path)
# print(type(lena))
# lena.thumbnail(minsize, Image.ANTIALIAS)
lena.resize(minsize, Image.ANTIALIAS).save("temp.png")
# lena.save("temp.png", "PNG")
def test2():
# input_path = "./temp.jpg"
# lena = Image.open(input_path)
# print((np.array(lena)).shape)
# print(np.array(lena))
# batch = np.reshape(lena, [-1])
# print(batch[0:10])
#
# ans = []
# for j in range(3):
# for k in range(7):
# # print(j, k, len(ans))
# for i in range(32):
# t = batch[i * 32 + j * 1024:(i + 1) * 32 + j * 1024]
# t = list(t)
# ans.extend(t * 7) # 扩充7倍刚好
new_img = Image.new('RGB', (224, 224), 255)
x = 0
for i in range(7):
y = 0
for j in range(7):
img = Image.open("./temp.jpg")
width, height = img.size
new_img.paste(img, (x, y))
y += height
x += 32
# lena = Image.fromarray(np.reshape(ans, [224,224,3]))
new_img.save("test2.png")
if __name__ == "__main__":
test2()
|
songjs1993/DeepLearning
|
3CNN/vgg/analysis_classification.py
|
Python
|
apache-2.0
| 1,519
|
from flask import Blueprint, session, render_template
from flask import Flask, render_template, request, redirect, abort, flash, session, url_for, send_from_directory
from sqlalchemy import case, func, exc as sql_exceptions
from werkzeug import exceptions as http_exceptions
from database import SessionLoader, DummySession, Domain, DomainAttrs, dSIPDomainMapping, dSIPMultiDomainMapping, Dispatcher, Gateways, Address
from modules.api.api_routes import addEndpointGroups
from shared import *
import settings
import globals
import re
domains = Blueprint('domains', __name__)
# Gateway to IP - A gateway can be a carrier or PBX
def gatewayIdToIP(pbx_id, db):
gw = db.query(Gateways).filter(Gateways.gwid == pbx_id).first()
if gw is not None:
return gw.address
def addDomain(domain, authtype, pbxs, notes, db):
# Create the domain because we need the domain id
PBXDomain = Domain(domain=domain, did=domain)
db.add(PBXDomain)
db.flush()
# Check if list of PBX's
if pbxs:
pbx_list = re.split(' |,',pbxs)
else:
pbx_list = []
# If list is found
if len(pbx_list) > 1 and authtype == "passthru":
pbx_id=pbx_list[0]
else:
#Else Single value was submitted
pbx_id = pbxs
# Implement Passthru authentication to the first PBX on the list.
# because Passthru Registration only works against one PBX
if authtype == "passthru":
PBXDomainAttr1 = DomainAttrs(did=domain, name='pbx_list', value=pbx_id)
PBXDomainAttr2 = DomainAttrs(did=domain, name='pbx_type', value="0")
PBXDomainAttr3 = DomainAttrs(did=domain, name='created_by', value="0")
PBXDomainAttr4 = DomainAttrs(did=domain, name='domain_auth', value=authtype)
PBXDomainAttr5 = DomainAttrs(did=domain, name='description', value="notes:{}".format(notes))
PBXDomainAttr6 = DomainAttrs(did=domain, name='pbx_ip', value=gatewayIdToIP(pbx_id, db))
db.add(PBXDomainAttr1)
db.add(PBXDomainAttr2)
db.add(PBXDomainAttr3)
db.add(PBXDomainAttr4)
db.add(PBXDomainAttr5)
db.add(PBXDomainAttr6)
# MSTeams Support
elif authtype == "msteams":
# Set of MS Teams Proxies
msteams_dns_endpoints=settings.MSTEAMS_DNS_ENDPOINTS
msteams_ip_endpoints=settings.MSTEAMS_IP_ENDPOINTS
# Attributes to specify that the domain was created manually
PBXDomainAttr1 = DomainAttrs(did=domain, name='pbx_list', value="{}".format(",".join(msteams_dns_endpoints)))
PBXDomainAttr2 = DomainAttrs(did=domain, name='pbx_type', value="0")
PBXDomainAttr3 = DomainAttrs(did=domain, name='created_by', value="0")
PBXDomainAttr4 = DomainAttrs(did=domain, name='domain_auth', value=authtype)
PBXDomainAttr5 = DomainAttrs(did=domain, name='description', value="notes:{}".format(notes))
# Serial folking will be used to forward registration info to multiple PBX's
PBXDomainAttr6 = DomainAttrs(did=domain, name='dispatcher_alg_reg', value="4")
PBXDomainAttr7 = DomainAttrs(did=domain, name='dispatcher_alg_in', value="4")
# Create entry in dispatcher and set dispatcher_set_id in domain_attrs
PBXDomainAttr8 = DomainAttrs(did=domain, name='dispatcher_set_id', value=PBXDomain.id)
# Use the default MS Teams SIP Proxy List if one isn't defined
print("pbx list {}".format(pbx_list))
if len(pbx_list) == 0 or pbx_list[0] == '':
for endpoint in msteams_dns_endpoints:
# Logic to handle when dSIPRouter is behind NAT (aka servernat is enabled)
if settings.EXTERNAL_IP_ADDR != settings.INTERNAL_IP_ADDR:
socket_addr = settings.INTERNAL_IP_ADDR
else:
socket_addr = settings.EXTERNAL_IP_ADDR
dispatcher = Dispatcher(setid=PBXDomain.id, destination=endpoint, attrs="socket=tls:{}:5061;ping_from=sip:{}".format(socket_addr,domain),description='msteam_endpoint:{}'.format(endpoint))
db.add(dispatcher)
db.add(PBXDomainAttr1)
db.add(PBXDomainAttr2)
db.add(PBXDomainAttr3)
db.add(PBXDomainAttr4)
db.add(PBXDomainAttr5)
db.add(PBXDomainAttr6)
db.add(PBXDomainAttr7)
db.add(PBXDomainAttr8)
# Check if the MSTeams IP(s) that send us OPTION messages is in the the address table
for endpoint_ip in msteams_ip_endpoints:
address_query = db.query(Address).filter(Address.ip_addr == endpoint_ip).first()
if address_query is None:
Addr = Address("msteams-sbc", endpoint_ip, 32, settings.FLT_MSTEAMS, gwgroup=0)
db.add(Addr)
# Add Endpoint group to enable Inbound Mapping
endpointGroup = {"name":domain,"endpoints":None}
endpoints = []
for hostname in msteams_dns_endpoints:
endpoints.append({"hostname":hostname,"description":"msteams_endpoint","maintmode":False});
endpointGroup['endpoints'] = endpoints
addEndpointGroups(endpointGroup,"msteams",domain)
# Implement external authentiction to either Realtime DB or Local Subscriber table
else:
# Attributes to specify that the domain was created manually
PBXDomainAttr1 = DomainAttrs(did=domain, name='pbx_list', value=str(pbx_list))
PBXDomainAttr2 = DomainAttrs(did=domain, name='pbx_type', value="0")
PBXDomainAttr3 = DomainAttrs(did=domain, name='created_by', value="0")
PBXDomainAttr4 = DomainAttrs(did=domain, name='domain_auth', value=authtype)
PBXDomainAttr5 = DomainAttrs(did=domain, name='description', value="notes:{}".format(notes))
# Serial folking will be used to forward registration info to multiple PBX's
PBXDomainAttr6 = DomainAttrs(did=domain, name='dispatcher_alg_reg', value="8")
PBXDomainAttr7 = DomainAttrs(did=domain, name='dispatcher_alg_in', value="4")
# Create entry in dispatcher and set dispatcher_set_id in domain_attrs
PBXDomainAttr8 = DomainAttrs(did=domain, name='dispatcher_set_id', value=PBXDomain.id)
for pbx_id in pbx_list:
dispatcher = Dispatcher(setid=PBXDomain.id, destination=gatewayIdToIP(pbx_id, db), description='pbx_id:{}'.format(pbx_id))
db.add(dispatcher)
db.add(PBXDomainAttr1)
db.add(PBXDomainAttr2)
db.add(PBXDomainAttr3)
db.add(PBXDomainAttr4)
db.add(PBXDomainAttr5)
db.add(PBXDomainAttr6)
db.add(PBXDomainAttr7)
db.add(PBXDomainAttr8)
@domains.route("/domains/msteams/<int:id>", methods=['GET'])
def configureMSTeams(id):
db = DummySession()
try:
if (settings.DEBUG):
debugEndpoint()
db = SessionLoader()
if not session.get('logged_in'):
return render_template('index.html')
domain_query = db.query(Domain).filter(Domain.id == id)
domain = domain_query.first()
return render_template('msteams.html', domainid=id, domain=domain)
except sql_exceptions.SQLAlchemyError as ex:
debugException(ex)
error = "db"
db.rollback()
db.flush()
return showError(type=error)
except http_exceptions.HTTPException as ex:
debugException(ex)
error = "http"
db.rollback()
db.flush()
return showError(type=error)
except Exception as ex:
debugException(ex, log_ex=True, print_ex=True, showstack=True)
error = "server"
db.rollback()
db.flush()
return showError(type=error)
finally:
db.close()
@domains.route("/domains", methods=['GET'])
def displayDomains():
db = DummySession()
try:
if (settings.DEBUG):
debugEndpoint()
db = SessionLoader()
if not session.get('logged_in'):
return render_template('index.html', version=settings.VERSION)
# sql1 = "select domain.id,domain.domain,dsip_domain_mapping.type,dsip_domain_mapping.pbx_id,dr_gateways.description from domain left join dsip_domain_mapping on domain.id = dsip_domain_mapping.domain_id left join dr_gateways on dsip_domain_mapping.pbx_id = dr_gateways.gwid;"
sql1 = "select distinct domain.did as domain, domain.id, value as type from domain_attrs join domain on domain.did = domain_attrs.did where name='pbx_type';"
res = db.execute(sql1)
sql2 = """select distinct domain_attrs.did, pbx_list, domain_auth, creator, description from domain_attrs join
( select did,value as pbx_list from domain_attrs where name='pbx_list' ) t1
on t1.did=domain_attrs.did join
( select did,value as description from domain_attrs where name='description' ) t2
on t2.did=domain_attrs.did join
( select did,value as domain_auth from domain_attrs where name='domain_auth' ) t3
on t3.did=domain_attrs.did join
( select did,description as creator from domain_attrs left join dr_gw_lists on domain_attrs.value = dr_gw_lists.id where name='created_by') t4
on t4.did=domain_attrs.did;"""
res2 = db.execute(sql2)
pbx_lookup = {}
for row in res2:
notes = strFieldsToDict(row['description'])['notes'] or ''
if row['creator'] is not None:
name = strFieldsToDict(row['creator'])['name'] or ''
else:
name = "Manually Created"
pbx_lookup[row['did']] = {
'pbx_list': str(row['pbx_list']).strip('[]').replace("'","").replace(",",", "),
'domain_auth': row['domain_auth'],
'name': name,
'notes': notes
}
return render_template('domains.html', rows=res, pbxlookup=pbx_lookup, hc=healthCheck())
except sql_exceptions.SQLAlchemyError as ex:
debugException(ex)
error = "db"
db.rollback()
db.flush()
return showError(type=error)
except http_exceptions.HTTPException as ex:
debugException(ex)
error = "http"
db.rollback()
db.flush()
return showError(type=error)
except Exception as ex:
debugException(ex, log_ex=True, print_ex=True, showstack=True)
error = "server"
db.rollback()
db.flush()
return showError(type=error)
finally:
db.close()
@domains.route("/domains", methods=['POST'])
def addUpdateDomain():
db = DummySession()
try:
if (settings.DEBUG):
debugEndpoint()
db = SessionLoader()
if not session.get('logged_in'):
return render_template('index.html')
form = stripDictVals(request.form.to_dict())
domain_id = form['domain_id'] if len(form['domain_id']) > 0 else ''
domainlist = form['domainlist'] if len(form['domainlist']) > 0 else ''
authtype = form['authtype'] if len(form['authtype']) > 0 else ''
pbxs = request.form['pbx_list'] if len(form['pbx_list']) > 0 else ''
notes = request.form['notes'] if len(form['notes']) > 0 else ''
# Adding
if len(domain_id) <= 0:
domainlist = domainlist.split(",")
for domain in domainlist:
addDomain(domain.strip(), authtype, pbxs, notes, db)
# Updating
else:
# remove old entries and add new ones
domain_query = db.query(Domain).filter(Domain.id == domain_id)
domain = domain_query.first()
if domain is not None:
db.query(DomainAttrs).filter(DomainAttrs.did == domain.did).delete(
synchronize_session=False
)
db.query(Dispatcher).filter(Dispatcher.setid == domain.id).delete(
synchronize_session=False
)
domain_query.delete(synchronize_session=False)
db.flush()
addDomain(domainlist.split(",")[0].strip(), authtype, pbxs, notes, db)
db.commit()
globals.reload_required = True
return displayDomains()
except sql_exceptions.SQLAlchemyError as ex:
debugException(ex, log_ex=True, print_ex=True, showstack=False)
error = "db"
db.rollback()
db.flush()
return showError(type=error)
except http_exceptions.HTTPException as ex:
debugException(ex)
error = "http"
db.rollback()
db.flush()
return showError(type=error)
except Exception as ex:
debugException(ex, log_ex=True, print_ex=True, showstack=False)
error = "server"
db.rollback()
db.flush()
return showError(type=error)
finally:
db.close()
@domains.route("/domainsdelete", methods=['POST'])
def deleteDomain():
db = DummySession()
try:
if (settings.DEBUG):
debugEndpoint()
db = SessionLoader()
if not session.get('logged_in'):
return render_template('index.html', version=settings.VERSION)
form = stripDictVals(request.form.to_dict())
domainid = form['domain_id'] if 'domain_id' in form else ''
domainname = form['domain_name'] if 'domain_name' in form else ''
dispatcherEntry = db.query(Dispatcher).filter(Dispatcher.setid == domainid)
domainAttrs = db.query(DomainAttrs).filter(DomainAttrs.did == domainname)
domainEntry = db.query(Domain).filter(Domain.did == domainname)
dispatcherEntry.delete(synchronize_session=False)
domainAttrs.delete(synchronize_session=False)
domainEntry.delete(synchronize_session=False)
db.commit()
globals.reload_required = True
return displayDomains()
except sql_exceptions.SQLAlchemyError as ex:
debugException(ex)
error = "db"
db.rollback()
db.flush()
return showError(type=error)
except http_exceptions.HTTPException as ex:
debugException(ex)
error = "http"
db.rollback()
db.flush()
return showError(type=error)
except Exception as ex:
debugException(ex)
error = "server"
db.rollback()
db.flush()
return showError(type=error)
finally:
db.close()
|
dOpensource/dsiprouter
|
gui/modules/domain/domain_routes.py
|
Python
|
apache-2.0
| 14,342
|
from __future__ import unicode_literals, with_statement
import logging
from compressor.cache import cache_set
from compressor.conf import settings
from django import template
from django.utils import six
try:
from importlib import import_module
except:
from django.utils.importlib import import_module
try:
from urllib.request import url2pathname
except ImportError:
from urllib import url2pathname
LOG = logging.getLogger(__name__)
# Some constants for nicer handling.
SOURCE_HUNK, SOURCE_FILE = 'inline', 'file'
METHOD_INPUT, METHOD_OUTPUT = 'input', 'output'
register = template.Library()
OUTPUT_FILE = 'file'
OUTPUT_INLINE = 'inline'
OUTPUT_MODES = (OUTPUT_FILE, OUTPUT_INLINE)
def compress_monkey_patch():
"""patch all compress
we need access to variables from widget scss
for example we have::
/themes/bootswatch/cyborg/_variables
but only if is cyborg active for this reasone we need
dynamically append import to every scss file
"""
from compressor.templatetags import compress as compress_tags
from compressor import base as compress_base
compress_base.Compressor.filter_input = filter_input
compress_base.Compressor.output = output
compress_base.Compressor.hunks = hunks
compress_tags.CompressorMixin.render_compressed = render_compressed
compress_tags.CompressorMixin.render_output = render_output
from django_pyscss import compressor as pyscss_compressor
pyscss_compressor.DjangoScssFilter.input = input
def render_compressed(self, context, kind, mode, forced=False):
# See if it has been rendered offline
cached_offline = self.render_offline(context, forced=forced)
if cached_offline:
return cached_offline
# Take a shortcut if we really don't have anything to do
if ((not settings.COMPRESS_ENABLED and
not settings.COMPRESS_PRECOMPILERS) and not forced):
return self.get_original_content(context)
context['compressed'] = {'name': getattr(self, 'name', None)}
compressor = self.get_compressor(context, kind)
# Prepare the actual compressor and check cache
cache_key, cache_content = self.render_cached(
compressor, kind, mode, forced=forced)
if cache_content is not None:
return cache_content
# call compressor output method and handle exceptions
try:
rendered_output = self.render_output(
compressor, mode, forced=forced, request=context.get('request'))
if cache_key:
cache_set(cache_key, rendered_output)
assert isinstance(rendered_output, six.string_types)
return rendered_output
except Exception:
if settings.DEBUG or forced:
raise
# Or don't do anything in production
return self.get_original_content(context)
def render_output(self, compressor, mode, forced=False, request=None):
return compressor.output(mode, forced=forced, request=request)
def input(self, **kwargs):
"""main override which append variables import to all scss content
"""
with_variables = None
try:
page = kwargs['request'].leonardo_page
except Exception as e:
LOG.exception(str(e))
else:
with_variables = """
@import "/themes/{}/{}/_variables";
{}
""".format(
page.theme.name.lower(),
page.color_scheme.name.lower(),
self.content)
return self.compiler.compile_string(
with_variables or self.content,
filename=self.filename)
def hunks(self, forced=False, request=None):
"""
The heart of content parsing, iterates over the
list of split contents and looks at its kind
to decide what to do with it. Should yield a
bunch of precompiled and/or rendered hunks.
"""
enabled = settings.COMPRESS_ENABLED or forced
for kind, value, basename, elem in self.split_contents():
precompiled = False
attribs = self.parser.elem_attribs(elem)
charset = attribs.get("charset", self.charset)
options = {
'method': METHOD_INPUT,
'elem': elem,
'kind': kind,
'basename': basename,
'charset': charset,
'request': request,
}
if kind == SOURCE_FILE:
options = dict(options, filename=value)
value = self.get_filecontent(value, charset)
if self.all_mimetypes:
precompiled, value = self.precompile(value, **options)
if enabled:
yield self.filter(value, **options)
else:
if precompiled:
yield self.handle_output(kind, value, forced=True,
basename=basename)
else:
yield self.parser.elem_str(elem)
def output(self, mode='file', forced=False, request=None):
"""
The general output method, override in subclass if you need to do
any custom modification. Calls other mode specific methods or simply
returns the content directly.
"""
output = '\n'.join(self.filter_input(forced, request=request))
if not output:
return ''
if settings.COMPRESS_ENABLED or forced:
filtered_output = self.filter_output(output)
return self.handle_output(mode, filtered_output, forced)
return output
def filter_input(self, forced=False, request=None):
"""
Passes each hunk (file or code) to the 'input' methods
of the compressor filters.
"""
content = []
for hunk in self.hunks(forced, request=request):
content.append(hunk)
return content
|
amboycharlie/Child-Friendly-LCMS
|
leonardo/utils/compress_patch.py
|
Python
|
apache-2.0
| 5,626
|
#
# Copyright 2013 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test listing raw events.
"""
import copy
import datetime
import mock
from oslo_utils import timeutils
from oslotest import mockpatch
from ceilometer.tests.api import v2
from ceilometer.tests import db as tests_db
class TestPostSamples(v2.FunctionalTest,
tests_db.MixinTestsWithBackendScenarios):
def fake_notifier_sample(self, ctxt, event_type, payload):
for m in payload:
del m['message_signature']
self.published.append(payload)
def setUp(self):
self.published = []
notifier = mock.Mock()
notifier.sample.side_effect = self.fake_notifier_sample
self.useFixture(mockpatch.Patch('oslo.messaging.Notifier',
return_value=notifier))
super(TestPostSamples, self).setUp()
def test_one(self):
s1 = [{'counter_name': 'apples',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 1,
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_metadata': {'name1': 'value1',
'name2': 'value2'}}]
data = self.post_json('/meters/apples/', s1)
# timestamp not given so it is generated.
s1[0]['timestamp'] = data.json[0]['timestamp']
# Ignore message id that is randomly generated
s1[0]['message_id'] = data.json[0]['message_id']
# source is generated if not provided.
s1[0]['source'] = '%s:openstack' % s1[0]['project_id']
self.assertEqual(s1, data.json)
self.assertEqual(s1[0], self.published[0][0])
def test_nested_metadata(self):
s1 = [{'counter_name': 'apples',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 1,
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_metadata': {'nest.name1': 'value1',
'name2': 'value2',
'nest.name2': 'value3'}}]
data = self.post_json('/meters/apples/', s1)
# timestamp not given so it is generated.
s1[0]['timestamp'] = data.json[0]['timestamp']
# Ignore message id that is randomly generated
s1[0]['message_id'] = data.json[0]['message_id']
# source is generated if not provided.
s1[0]['source'] = '%s:openstack' % s1[0]['project_id']
unwound = copy.copy(s1[0])
unwound['resource_metadata'] = {'nest': {'name1': 'value1',
'name2': 'value3'},
'name2': 'value2'}
# only the published sample should be unwound, not the representation
# in the API response
self.assertEqual(s1[0], data.json[0])
self.assertEqual(unwound, self.published[0][0])
def test_invalid_counter_type(self):
s1 = [{'counter_name': 'my_counter_name',
'counter_type': 'INVALID_TYPE',
'counter_unit': 'instance',
'counter_volume': 1,
'source': 'closedstack',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_metadata': {'name1': 'value1',
'name2': 'value2'}}]
data = self.post_json('/meters/my_counter_name/', s1,
expect_errors=True)
self.assertEqual(400, data.status_int)
self.assertEqual(0, len(self.published))
def test_messsage_id_provided(self):
"""Do not accept sample with message_id."""
s1 = [{'counter_name': 'my_counter_name',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 1,
'message_id': 'evil',
'source': 'closedstack',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_metadata': {'name1': 'value1',
'name2': 'value2'}}]
data = self.post_json('/meters/my_counter_name/', s1,
expect_errors=True)
self.assertEqual(400, data.status_int)
self.assertEqual(0, len(self.published))
def test_wrong_project_id(self):
"""Do not accept cross posting samples to different projects."""
s1 = [{'counter_name': 'my_counter_name',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 1,
'source': 'closedstack',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_metadata': {'name1': 'value1',
'name2': 'value2'}}]
data = self.post_json('/meters/my_counter_name/', s1,
expect_errors=True,
headers={
"X-Roles": "Member",
"X-Tenant-Name": "lu-tenant",
"X-Project-Id":
"bc23a9d531064583ace8f67dad60f6bb",
})
self.assertEqual(400, data.status_int)
self.assertEqual(0, len(self.published))
def test_multiple_samples(self):
"""Send multiple samples.
The usecase here is to reduce the chatter and send the counters
at a slower cadence.
"""
samples = []
for x in range(6):
dt = datetime.datetime(2012, 8, 27, x, 0, tzinfo=None)
s = {'counter_name': 'apples',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': float(x * 3),
'source': 'evil',
'timestamp': dt.isoformat(),
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_metadata': {'name1': str(x),
'name2': str(x + 4)}}
samples.append(s)
data = self.post_json('/meters/apples/', samples)
for x, s in enumerate(samples):
# source is modified to include the project_id.
s['source'] = '%s:%s' % (s['project_id'],
s['source'])
# Ignore message id that is randomly generated
s['message_id'] = data.json[x]['message_id']
# remove tzinfo to compare generated timestamp
# with the provided one
c = data.json[x]
timestamp = timeutils.parse_isotime(c['timestamp'])
c['timestamp'] = timestamp.replace(tzinfo=None).isoformat()
# do the same on the pipeline
msg = self.published[0][x]
timestamp = timeutils.parse_isotime(msg['timestamp'])
msg['timestamp'] = timestamp.replace(tzinfo=None).isoformat()
self.assertEqual(s, c)
self.assertEqual(s, self.published[0][x])
def test_missing_mandatory_fields(self):
"""Do not accept posting samples with missing mandatory fields."""
s1 = [{'counter_name': 'my_counter_name',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 1,
'source': 'closedstack',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_metadata': {'name1': 'value1',
'name2': 'value2'}}]
# one by one try posting without a mandatory field.
for m in ['counter_volume', 'counter_unit', 'counter_type',
'resource_id', 'counter_name']:
s_broke = copy.copy(s1)
del s_broke[0][m]
print('posting without %s' % m)
data = self.post_json('/meters/my_counter_name', s_broke,
expect_errors=True)
self.assertEqual(400, data.status_int)
def test_multiple_project_id_and_admin(self):
"""Allow admin is allowed to set multiple project_id."""
s1 = [{'counter_name': 'my_counter_name',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 1,
'source': 'closedstack',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
},
{'counter_name': 'my_counter_name',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 2,
'source': 'closedstack',
'project_id': '4af38dca-f6fc-11e2-94f5-14dae9283f29',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'resource_metadata': {'name1': 'value1',
'name2': 'value2'}}]
data = self.post_json('/meters/my_counter_name/', s1,
headers={"X-Roles": "admin"})
self.assertEqual(200, data.status_int)
for x, s in enumerate(s1):
# source is modified to include the project_id.
s['source'] = '%s:%s' % (s['project_id'],
'closedstack')
# Ignore message id that is randomly generated
s['message_id'] = data.json[x]['message_id']
# timestamp not given so it is generated.
s['timestamp'] = data.json[x]['timestamp']
s.setdefault('resource_metadata', dict())
self.assertEqual(s, data.json[x])
self.assertEqual(s, self.published[0][x])
def test_multiple_samples_multiple_sources(self):
"""Test posting with special conditions.
Do accept a single post with some multiples sources with some of them
null.
"""
s1 = [{'counter_name': 'my_counter_name',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 1,
'source': 'paperstack',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
},
{'counter_name': 'my_counter_name',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 5,
'source': 'waterstack',
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
},
{'counter_name': 'my_counter_name',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 2,
'project_id': '35b17138-b364-4e6a-a131-8f3099c5be68',
'user_id': 'efd87807-12d2-4b38-9c70-5f5c2ac427ff',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'resource_metadata': {'name1': 'value1',
'name2': 'value2'}}]
data = self.post_json('/meters/my_counter_name/', s1,
expect_errors=True)
self.assertEqual(200, data.status_int)
for x, s in enumerate(s1):
# source is modified to include the project_id.
s['source'] = '%s:%s' % (
s['project_id'],
s.get('source', self.CONF.sample_source)
)
# Ignore message id that is randomly generated
s['message_id'] = data.json[x]['message_id']
# timestamp not given so it is generated.
s['timestamp'] = data.json[x]['timestamp']
s.setdefault('resource_metadata', dict())
self.assertEqual(s, data.json[x])
self.assertEqual(s, self.published[0][x])
def test_missing_project_user_id(self):
"""Ensure missing project & user IDs are defaulted appropriately."""
s1 = [{'counter_name': 'my_counter_name',
'counter_type': 'gauge',
'counter_unit': 'instance',
'counter_volume': 1,
'source': 'closedstack',
'resource_id': 'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
'resource_metadata': {'name1': 'value1',
'name2': 'value2'}}]
project_id = 'bc23a9d531064583ace8f67dad60f6bb'
user_id = 'fd87807-12d2-4b38-9c70-5f5c2ac427ff'
data = self.post_json('/meters/my_counter_name/', s1,
expect_errors=True,
headers={
'X-Roles': 'chief-bottle-washer',
'X-Project-Id': project_id,
'X-User-Id': user_id,
})
self.assertEqual(200, data.status_int)
for x, s in enumerate(s1):
# source is modified to include the project_id.
s['source'] = '%s:%s' % (project_id,
s['source'])
# Ignore message id that is randomly generated
s['message_id'] = data.json[x]['message_id']
# timestamp not given so it is generated.
s['timestamp'] = data.json[x]['timestamp']
s['user_id'] = user_id
s['project_id'] = project_id
self.assertEqual(s, data.json[x])
self.assertEqual(s, self.published[0][x])
|
Juniper/ceilometer
|
ceilometer/tests/api/v2/test_post_samples_scenarios.py
|
Python
|
apache-2.0
| 15,384
|
# RUN: %PYTHON %s 2>&1 | FileCheck %s
# This file contains simple test cases that combine various codegen options.
from ..core.experts import *
from ..core.harness import *
from ..core.transforms import *
from ..contraction.definitions import *
################################################################################
### Compilation strategies.
################################################################################
# No tiling.
expert_no_tiling = Bufferize().then(LoweringOnlyExpert('', ''))
# 1 level of tiling.
expert_tile_1 = \
Tile('matvec', 'linalg.generic', tile_sizes=[8, 24]) \
.then(Vectorize('matvec', '')) \
.then(Bufferize()) \
.then(LoweringOnlyExpert('', ''))
# 1 level of tile and interchange.
expert_tile_and_interchange_1 = \
Tile('matvec',
'linalg.generic',
tile_sizes=[8, 24],
tile_interchange=[1, 0]) \
.then(Vectorize('matvec', '')) \
.then(Bufferize()) \
.then(LoweringOnlyExpert('', ''))
# 1 level of tiling and then generalize and interchange.
expert_tile_1_and_generalize_interchange = \
Tile('matvec',
'linalg.generic',
tile_sizes=[8, 24],
tile_interchange=[1, 0]) \
.then(Generalize('matvec', 'linalg.generic')) \
.then(Interchange('matvec', iterator_interchange=[0, 1])) \
.then(Vectorize('matvec', '')) \
.then(Bufferize()) \
.then(LoweringOnlyExpert('', ''))
# 1 level of tiling, peel, scalarize the remaining dynamic dims.
expert_tile_1_peel_scalarize = \
Tile('matvec', 'linalg.generic', tile_sizes=[8], peel=[0]) \
.then(Tile('matvec', 'linalg.generic', scalarize_dyn_dims=True)) \
.then(Vectorize('matvec', '')) \
.then(Bufferize()) \
.then(LoweringOnlyExpert('', ''))
# 1 level of tiling, with padding.
expert_tile_1_pad = \
Tile('matvec',
'linalg.generic',
tile_sizes=[8, 24],
pad=True,
pack_paddings=[1, 1, 1]) \
.then(Vectorize('matvec', '')) \
.then(Bufferize()) \
.then(LoweringOnlyExpert('', ''))
# 1 level of tiling, with padding, hoisted.
expert_tile_1_pad_hoist = \
Tile('matvec',
'linalg.generic',
tile_sizes=[8, 24],
pad=True,
pack_paddings=[1, 1, 1],
hoist_paddings=[3, 3, 3]) \
.then(Vectorize('matvec', '')) \
.then(Bufferize()) \
.then(LoweringOnlyExpert('', ''))
# 2 levels of tiling, with padding, hoisted.
expert_tile_2_pad_hoist = \
Tile('matvec', 'linalg.generic', tile_sizes=[8, 24]) \
.then(Tile('matvec',
'linalg.generic',
tile_sizes=[4, 12],
pad=True,
pack_paddings=[1, 1, 1],
hoist_paddings=[6, 6, 6]))\
.then(Vectorize('matvec', '')) \
.then(Bufferize()) \
.then(LoweringOnlyExpert('', ''))
# 3 levels of tiling, with padding, hoisted. Peeling on the 3rd level.
expert_tile_3_pad_hoist_peel = \
Tile('matvec', 'linalg.generic', tile_sizes=[8, 24], pad=False) \
.then(Tile('matvec',
'linalg.generic',
tile_sizes=[4, 12],
pad=True,
pack_paddings=[1, 1, 1],
hoist_paddings=[6, 6, 6]))\
.then(Tile('matvec', 'linalg.generic', tile_sizes=[2, 7], peel=[0, 1]))\
.then(Vectorize('matvec', '')) \
.then(Bufferize()) \
.then(LoweringOnlyExpert('', ''))
# 3 levels of tiling, with padding, hoisted. Peeling on the 3rd level.
# Scalarize remaining dynamic dims.
expert_tile_3_pad_hoist_peel_scalarize = \
Tile('matvec', 'linalg.generic', tile_sizes=[8, 24])\
.then(Tile('matvec',
'linalg.generic',
tile_sizes=[4, 12],
pad=True,
pack_paddings=[1, 1, 1],
hoist_paddings=[6, 6, 6]))\
.then(Tile('matvec', 'linalg.generic', tile_sizes=[2, 7], peel=[0, 1])) \
.then(Tile('matvec', 'linalg.generic', scalarize_dyn_dims=True))\
.then(Vectorize('matvec', '')) \
.then(Bufferize()) \
.then(LoweringOnlyExpert('', ''))
# Fuse, then tile.
expert_fuse_2_tile_1 = \
Fuse('matvec', 'linalg.generic', tile_sizes=[8, 16]) \
.then(Fuse('matvec', 'linalg.generic', tile_sizes=[4, 4])) \
.then(Tile('matvec', 'linalg.generic', tile_sizes=[2, 3])) \
.then(Vectorize('matvec', '')) \
.then(Bufferize()) \
.then(LoweringOnlyExpert('', ''))
expert_fuse_and_pad = \
Fuse('matvec', 'linalg.generic', tile_sizes=[16, 16]) \
.then(Tile('matvec',
'linalg.generic',
tile_sizes=[8, 12],
pad=True,
pack_paddings=[1, 1, 1],
hoist_paddings=[3, 3, 3])) \
.then(Vectorize('matvec', 'linalg.generic')) \
.then(Tile('matvec', 'linalg.fill', tile_sizes=[8, 8]))\
.then(Vectorize('matvec', '')) \
.then(Bufferize()) \
.then(LoweringOnlyExpert('', ''))
all_experts = [ \
expert_no_tiling,
expert_tile_1,
expert_tile_and_interchange_1,
expert_tile_1_and_generalize_interchange,
expert_tile_1_peel_scalarize,
expert_tile_1_pad,
expert_tile_1_pad_hoist,
expert_tile_2_pad_hoist,
expert_tile_3_pad_hoist_peel,
expert_tile_3_pad_hoist_peel_scalarize,
expert_fuse_2_tile_1,
expert_fuse_and_pad
]
################################################################################
### Problem instantiations.
################################################################################
keys = ['m', 'n']
# CHECK-NOT: FAILURE
def main():
n_iters = 1
problem_size_list = [[24, 32], [27, 37]]
test_harness(lambda s, t: EinsumProblem('mn,n', 'mn', 2), [[np.float32] * 3],
test_sizes(keys, problem_size_list),
all_experts,
n_iters=n_iters,
function_name='matvec')
if __name__ == '__main__':
main()
|
google/iree-llvm-sandbox
|
python/examples/matvec/test.py
|
Python
|
apache-2.0
| 6,178
|
"""The tests for the Modbus light component."""
from pymodbus.exceptions import ModbusException
import pytest
from homeassistant.components.light import DOMAIN as LIGHT_DOMAIN
from homeassistant.components.modbus.const import (
CALL_TYPE_COIL,
CALL_TYPE_DISCRETE,
CALL_TYPE_REGISTER_HOLDING,
CALL_TYPE_REGISTER_INPUT,
CONF_INPUT_TYPE,
CONF_STATE_OFF,
CONF_STATE_ON,
CONF_VERIFY,
CONF_WRITE_TYPE,
MODBUS_DOMAIN,
)
from homeassistant.const import (
CONF_ADDRESS,
CONF_COMMAND_OFF,
CONF_COMMAND_ON,
CONF_HOST,
CONF_LIGHTS,
CONF_NAME,
CONF_PORT,
CONF_SLAVE,
CONF_TYPE,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import State
from homeassistant.setup import async_setup_component
from .conftest import ReadResult, base_config_test, base_test, prepare_service_update
from tests.common import mock_restore_cache
@pytest.mark.parametrize(
"do_config",
[
{
CONF_ADDRESS: 1234,
},
{
CONF_ADDRESS: 1234,
CONF_WRITE_TYPE: CALL_TYPE_COIL,
},
{
CONF_ADDRESS: 1234,
CONF_SLAVE: 1,
CONF_COMMAND_OFF: 0x00,
CONF_COMMAND_ON: 0x01,
CONF_VERIFY: {
CONF_INPUT_TYPE: CALL_TYPE_REGISTER_HOLDING,
CONF_ADDRESS: 1235,
CONF_STATE_OFF: 0,
CONF_STATE_ON: 1,
},
},
{
CONF_ADDRESS: 1234,
CONF_SLAVE: 1,
CONF_COMMAND_OFF: 0x00,
CONF_COMMAND_ON: 0x01,
CONF_VERIFY: {
CONF_INPUT_TYPE: CALL_TYPE_REGISTER_INPUT,
CONF_ADDRESS: 1235,
CONF_STATE_OFF: 0,
CONF_STATE_ON: 1,
},
},
{
CONF_ADDRESS: 1234,
CONF_SLAVE: 1,
CONF_COMMAND_OFF: 0x00,
CONF_COMMAND_ON: 0x01,
CONF_VERIFY: {
CONF_INPUT_TYPE: CALL_TYPE_DISCRETE,
CONF_ADDRESS: 1235,
CONF_STATE_OFF: 0,
CONF_STATE_ON: 1,
},
},
{
CONF_ADDRESS: 1234,
CONF_SLAVE: 1,
CONF_COMMAND_OFF: 0x00,
CONF_COMMAND_ON: 0x01,
CONF_VERIFY: None,
},
],
)
async def test_config_light(hass, do_config):
"""Run test for light."""
device_name = "test_light"
device_config = {
CONF_NAME: device_name,
**do_config,
}
await base_config_test(
hass,
device_config,
device_name,
LIGHT_DOMAIN,
CONF_LIGHTS,
None,
method_discovery=True,
)
@pytest.mark.parametrize("call_type", [CALL_TYPE_COIL, CALL_TYPE_REGISTER_HOLDING])
@pytest.mark.parametrize(
"regs,verify,expected",
[
(
[0x00],
{CONF_VERIFY: {}},
STATE_OFF,
),
(
[0x01],
{CONF_VERIFY: {}},
STATE_ON,
),
(
[0xFE],
{CONF_VERIFY: {}},
STATE_OFF,
),
(
None,
{CONF_VERIFY: {}},
STATE_UNAVAILABLE,
),
(
None,
{},
STATE_OFF,
),
],
)
async def test_all_light(hass, call_type, regs, verify, expected):
"""Run test for given config."""
light_name = "modbus_test_light"
state = await base_test(
hass,
{
CONF_NAME: light_name,
CONF_ADDRESS: 1234,
CONF_SLAVE: 1,
CONF_WRITE_TYPE: call_type,
**verify,
},
light_name,
LIGHT_DOMAIN,
CONF_LIGHTS,
None,
regs,
expected,
method_discovery=True,
scan_interval=5,
)
assert state == expected
async def test_restore_state_light(hass):
"""Run test for sensor restore state."""
light_name = "test_light"
entity_id = f"{LIGHT_DOMAIN}.{light_name}"
test_value = STATE_ON
config_light = {CONF_NAME: light_name, CONF_ADDRESS: 17}
mock_restore_cache(
hass,
(State(f"{entity_id}", test_value),),
)
await base_config_test(
hass,
config_light,
light_name,
LIGHT_DOMAIN,
CONF_LIGHTS,
None,
method_discovery=True,
)
assert hass.states.get(entity_id).state == test_value
async def test_light_service_turn(hass, caplog, mock_pymodbus):
"""Run test for service turn_on/turn_off."""
entity_id1 = f"{LIGHT_DOMAIN}.light1"
entity_id2 = f"{LIGHT_DOMAIN}.light2"
config = {
MODBUS_DOMAIN: {
CONF_TYPE: "tcp",
CONF_HOST: "modbusTestHost",
CONF_PORT: 5501,
CONF_LIGHTS: [
{
CONF_NAME: "light1",
CONF_ADDRESS: 17,
CONF_WRITE_TYPE: CALL_TYPE_REGISTER_HOLDING,
},
{
CONF_NAME: "light2",
CONF_ADDRESS: 17,
CONF_WRITE_TYPE: CALL_TYPE_REGISTER_HOLDING,
CONF_VERIFY: {},
},
],
},
}
assert await async_setup_component(hass, MODBUS_DOMAIN, config) is True
await hass.async_block_till_done()
assert MODBUS_DOMAIN in hass.config.components
assert hass.states.get(entity_id1).state == STATE_OFF
await hass.services.async_call(
"light", "turn_on", service_data={"entity_id": entity_id1}
)
await hass.async_block_till_done()
assert hass.states.get(entity_id1).state == STATE_ON
await hass.services.async_call(
"light", "turn_off", service_data={"entity_id": entity_id1}
)
await hass.async_block_till_done()
assert hass.states.get(entity_id1).state == STATE_OFF
mock_pymodbus.read_holding_registers.return_value = ReadResult([0x01])
assert hass.states.get(entity_id2).state == STATE_OFF
await hass.services.async_call(
"light", "turn_on", service_data={"entity_id": entity_id2}
)
await hass.async_block_till_done()
assert hass.states.get(entity_id2).state == STATE_ON
mock_pymodbus.read_holding_registers.return_value = ReadResult([0x00])
await hass.services.async_call(
"light", "turn_off", service_data={"entity_id": entity_id2}
)
await hass.async_block_till_done()
assert hass.states.get(entity_id2).state == STATE_OFF
mock_pymodbus.write_register.side_effect = ModbusException("fail write_")
await hass.services.async_call(
"light", "turn_on", service_data={"entity_id": entity_id2}
)
await hass.async_block_till_done()
assert hass.states.get(entity_id2).state == STATE_UNAVAILABLE
mock_pymodbus.write_coil.side_effect = ModbusException("fail write_")
await hass.services.async_call(
"light", "turn_off", service_data={"entity_id": entity_id1}
)
await hass.async_block_till_done()
assert hass.states.get(entity_id1).state == STATE_UNAVAILABLE
async def test_service_light_update(hass, mock_pymodbus):
"""Run test for service homeassistant.update_entity."""
entity_id = "light.test"
config = {
CONF_LIGHTS: [
{
CONF_NAME: "test",
CONF_ADDRESS: 1234,
CONF_WRITE_TYPE: CALL_TYPE_COIL,
CONF_VERIFY: {},
}
]
}
mock_pymodbus.read_discrete_inputs.return_value = ReadResult([0x01])
await prepare_service_update(
hass,
config,
)
await hass.services.async_call(
"homeassistant", "update_entity", {"entity_id": entity_id}, blocking=True
)
assert hass.states.get(entity_id).state == STATE_ON
mock_pymodbus.read_coils.return_value = ReadResult([0x00])
await hass.services.async_call(
"homeassistant", "update_entity", {"entity_id": entity_id}, blocking=True
)
assert hass.states.get(entity_id).state == STATE_OFF
|
kennedyshead/home-assistant
|
tests/components/modbus/test_light.py
|
Python
|
apache-2.0
| 8,098
|
# -*- coding: utf-8 -*-
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
import time, unittest
def is_alert_present(wd):
try:
wd.switch_to_alert().text
return True
except:
return False
class (unittest.TestCase):
def setUp(self):
self.wd = WebDriver()
self.wd.implicitly_wait(60)
def test_(self):
success = True
wd = self.wd
wd.get("http://localhost/addressbook/group.php")
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").clear()
wd.find_element_by_name("pass").send_keys("secret")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").clear()
wd.find_element_by_name("user").send_keys("admin")
wd.find_element_by_css_selector("input[type=\"submit\"]").click()
wd.find_element_by_name("new").click()
wd.find_element_by_name("group_name").click()
wd.find_element_by_name("group_name").clear()
wd.find_element_by_name("group_name").send_keys("ngrogup")
wd.find_element_by_name("group_header").click()
wd.find_element_by_name("group_header").clear()
wd.find_element_by_name("group_header").send_keys("jkfjkd")
wd.find_element_by_name("group_footer").click()
wd.find_element_by_name("group_footer").clear()
wd.find_element_by_name("group_footer").send_keys("jfkldfj")
wd.find_element_by_name("submit").click()
wd.find_element_by_link_text("group page").click()
wd.find_element_by_link_text("Logout").click()
wd.find_element_by_name("pass").click()
wd.find_element_by_name("pass").send_keys("\\undefined")
wd.find_element_by_name("user").click()
wd.find_element_by_name("user").send_keys("\\undefined")
self.assertTrue(success)
def tearDown(self):
self.wd.quit()
if __name__ == '__main__':
unittest.main()
|
VLovets/phyton_training_01
|
test_add_group.py
|
Python
|
apache-2.0
| 2,026
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The VMware API VM utility module to build SOAP object specs.
"""
import collections
import copy
import functools
from oslo.config import cfg
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
from nova.openstack.common import units
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vim_util
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
DSRecord = collections.namedtuple(
'DSRecord', ['datastore', 'name', 'capacity', 'freespace'])
# A cache for VM references. The key will be the VM name
# and the value is the VM reference. The VM name is unique. This
# is either the UUID of the instance or UUID-rescue in the case
# that this is a rescue VM. This is in order to prevent
# unnecessary communication with the backend.
_VM_REFS_CACHE = {}
def vm_refs_cache_reset():
global _VM_REFS_CACHE
_VM_REFS_CACHE = {}
def vm_ref_cache_delete(id):
_VM_REFS_CACHE.pop(id, None)
def vm_ref_cache_update(id, vm_ref):
_VM_REFS_CACHE[id] = vm_ref
def vm_ref_cache_get(id):
return _VM_REFS_CACHE.get(id)
def _vm_ref_cache(id, func, session, data):
vm_ref = vm_ref_cache_get(id)
if not vm_ref:
vm_ref = func(session, data)
vm_ref_cache_update(id, vm_ref)
return vm_ref
def vm_ref_cache_from_instance(func):
@functools.wraps(func)
def wrapper(session, instance):
id = instance['uuid']
return _vm_ref_cache(id, func, session, instance)
return wrapper
def vm_ref_cache_from_name(func):
@functools.wraps(func)
def wrapper(session, name):
id = name
return _vm_ref_cache(id, func, session, name)
return wrapper
# the config key which stores the VNC port
VNC_CONFIG_KEY = 'config.extraConfig["RemoteDisplay.vnc.port"]'
def get_vm_create_spec(client_factory, instance, name, data_store_name,
vif_infos, os_type="otherGuest"):
"""Builds the VM Create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = name
config_spec.guestId = os_type
# The name is the unique identifier for the VM. This will either be the
# instance UUID or the instance UUID with suffix '-rescue' for VM's that
# are in rescue mode
config_spec.instanceUuid = name
# Allow nested ESX instances to host 64 bit VMs.
if os_type == "vmkernel5Guest":
config_spec.nestedHVEnabled = "True"
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = "[" + data_store_name + "]"
config_spec.files = vm_file_info
tools_info = client_factory.create('ns0:ToolsConfigInfo')
tools_info.afterPowerOn = True
tools_info.afterResume = True
tools_info.beforeGuestStandby = True
tools_info.beforeGuestShutdown = True
tools_info.beforeGuestReboot = True
config_spec.tools = tools_info
config_spec.numCPUs = int(instance['vcpus'])
config_spec.memoryMB = int(instance['memory_mb'])
vif_spec_list = []
for vif_info in vif_infos:
vif_spec = create_network_spec(client_factory, vif_info)
vif_spec_list.append(vif_spec)
device_config_spec = vif_spec_list
config_spec.deviceChange = device_config_spec
# add vm-uuid and iface-id.x values for Neutron
extra_config = []
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.vm-uuid"
opt.value = instance['uuid']
extra_config.append(opt)
i = 0
for vif_info in vif_infos:
if vif_info['iface_id']:
opt = client_factory.create('ns0:OptionValue')
opt.key = "nvp.iface-id.%d" % i
opt.value = vif_info['iface_id']
extra_config.append(opt)
i += 1
config_spec.extraConfig = extra_config
return config_spec
def get_vm_resize_spec(client_factory, instance):
"""Provides updates for a VM spec."""
resize_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
resize_spec.numCPUs = int(instance['vcpus'])
resize_spec.memoryMB = int(instance['memory_mb'])
return resize_spec
def create_controller_spec(client_factory, key, adapter_type="lsiLogic"):
"""Builds a Config Spec for the LSI or Bus Logic Controller's addition
which acts as the controller for the virtual hard disk to be attached
to the VM.
"""
# Create a controller for the Virtual Hard Disk
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if adapter_type == "busLogic":
virtual_controller = client_factory.create(
'ns0:VirtualBusLogicController')
elif adapter_type == "lsiLogicsas":
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicSASController')
else:
virtual_controller = client_factory.create(
'ns0:VirtualLsiLogicController')
virtual_controller.key = key
virtual_controller.busNumber = 0
virtual_controller.sharedBus = "noSharing"
virtual_device_config.device = virtual_controller
return virtual_device_config
def create_network_spec(client_factory, vif_info):
"""Builds a config spec for the addition of a new network
adapter to the VM.
"""
network_spec = client_factory.create('ns0:VirtualDeviceConfigSpec')
network_spec.operation = "add"
# Keep compatible with other Hyper vif model parameter.
if vif_info['vif_model'] == "e1000":
vif_info['vif_model'] = "VirtualE1000"
vif = 'ns0:' + vif_info['vif_model']
net_device = client_factory.create(vif)
# NOTE(asomya): Only works on ESXi if the portgroup binding is set to
# ephemeral. Invalid configuration if set to static and the NIC does
# not come up on boot if set to dynamic.
network_ref = vif_info['network_ref']
network_name = vif_info['network_name']
mac_address = vif_info['mac_address']
backing = None
if network_ref and network_ref['type'] == 'OpaqueNetwork':
backing_name = ''.join(['ns0:VirtualEthernetCard',
'OpaqueNetworkBackingInfo'])
backing = client_factory.create(backing_name)
backing.opaqueNetworkId = network_ref['network-id']
backing.opaqueNetworkType = network_ref['network-type']
elif (network_ref and
network_ref['type'] == "DistributedVirtualPortgroup"):
backing_name = ''.join(['ns0:VirtualEthernetCardDistributed',
'VirtualPortBackingInfo'])
backing = client_factory.create(backing_name)
portgroup = client_factory.create(
'ns0:DistributedVirtualSwitchPortConnection')
portgroup.switchUuid = network_ref['dvsw']
portgroup.portgroupKey = network_ref['dvpg']
backing.port = portgroup
else:
backing = client_factory.create(
'ns0:VirtualEthernetCardNetworkBackingInfo')
backing.deviceName = network_name
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = True
connectable_spec.connected = True
net_device.connectable = connectable_spec
net_device.backing = backing
# The Server assigns a Key to the device. Here we pass a -ve temporary key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
net_device.key = -47
net_device.addressType = "manual"
net_device.macAddress = mac_address
net_device.wakeOnLanEnabled = True
network_spec.device = net_device
return network_spec
def get_vmdk_attach_config_spec(client_factory,
disk_type="preallocated",
file_path=None,
disk_size=None,
linked_clone=False,
controller_key=None,
unit_number=None,
device_name=None):
"""Builds the vmdk attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = create_virtual_disk_spec(client_factory,
controller_key, disk_type, file_path,
disk_size, linked_clone,
unit_number, device_name)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_cdrom_attach_config_spec(client_factory,
datastore,
file_path,
controller_key,
cdrom_unit_number):
"""Builds and returns the cdrom attach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vmdk_detach_config_spec(client_factory, device,
destroy_disk=False):
"""Builds the vmdk detach config spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
device_config_spec = []
virtual_device_config_spec = detach_virtual_disk_spec(client_factory,
device,
destroy_disk)
device_config_spec.append(virtual_device_config_spec)
config_spec.deviceChange = device_config_spec
return config_spec
def get_vm_extra_config_spec(client_factory, extra_opts):
"""Builds extra spec fields from a dictionary."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
# add the key value pairs
extra_config = []
for key, value in extra_opts.iteritems():
opt = client_factory.create('ns0:OptionValue')
opt.key = key
opt.value = value
extra_config.append(opt)
config_spec.extraConfig = extra_config
return config_spec
def get_vmdk_path_and_adapter_type(hardware_devices, uuid=None):
"""Gets the vmdk file path and the storage adapter type."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
vmdk_file_path = None
vmdk_controller_key = None
disk_type = None
adapter_type_dict = {}
for device in hardware_devices:
if device.__class__.__name__ == "VirtualDisk":
if device.backing.__class__.__name__ == \
"VirtualDiskFlatVer2BackingInfo":
if uuid:
if uuid in device.backing.fileName:
vmdk_file_path = device.backing.fileName
else:
vmdk_file_path = device.backing.fileName
vmdk_controller_key = device.controllerKey
if getattr(device.backing, 'thinProvisioned', False):
disk_type = "thin"
else:
if getattr(device.backing, 'eagerlyScrub', False):
disk_type = "eagerZeroedThick"
else:
disk_type = "preallocated"
elif device.__class__.__name__ == "VirtualLsiLogicController":
adapter_type_dict[device.key] = "lsiLogic"
elif device.__class__.__name__ == "VirtualBusLogicController":
adapter_type_dict[device.key] = "busLogic"
elif device.__class__.__name__ == "VirtualIDEController":
adapter_type_dict[device.key] = "ide"
elif device.__class__.__name__ == "VirtualLsiLogicSASController":
adapter_type_dict[device.key] = "lsiLogicsas"
adapter_type = adapter_type_dict.get(vmdk_controller_key, "")
return (vmdk_file_path, adapter_type, disk_type)
def _find_controller_slot(controller_keys, taken, max_unit_number):
for controller_key in controller_keys:
for unit_number in range(max_unit_number):
if not unit_number in taken.get(controller_key, []):
return controller_key, unit_number
def _is_ide_controller(device):
return device.__class__.__name__ == 'VirtualIDEController'
def _is_scsi_controller(device):
return device.__class__.__name__ in ['VirtualLsiLogicController',
'VirtualLsiLogicSASController',
'VirtualBusLogicController']
def _find_allocated_slots(devices):
"""Return dictionary which maps controller_key to list of allocated unit
numbers for that controller_key.
"""
taken = {}
for device in devices:
if hasattr(device, 'controllerKey') and hasattr(device, 'unitNumber'):
unit_numbers = taken.setdefault(device.controllerKey, [])
unit_numbers.append(device.unitNumber)
if _is_scsi_controller(device):
# the SCSI controller sits on its own bus
unit_numbers = taken.setdefault(device.key, [])
unit_numbers.append(device.scsiCtlrUnitNumber)
return taken
def allocate_controller_key_and_unit_number(client_factory, devices,
adapter_type):
"""This function inspects the current set of hardware devices and returns
controller_key and unit_number that can be used for attaching a new virtual
disk to adapter with the given adapter_type.
"""
if devices.__class__.__name__ == "ArrayOfVirtualDevice":
devices = devices.VirtualDevice
taken = _find_allocated_slots(devices)
ret = None
if adapter_type == 'ide':
ide_keys = [dev.key for dev in devices if _is_ide_controller(dev)]
ret = _find_controller_slot(ide_keys, taken, 2)
elif adapter_type in ['lsiLogic', 'lsiLogicsas', 'busLogic']:
scsi_keys = [dev.key for dev in devices if _is_scsi_controller(dev)]
ret = _find_controller_slot(scsi_keys, taken, 16)
if ret:
return ret[0], ret[1], None
# create new controller with the specified type and return its spec
controller_key = -101
controller_spec = create_controller_spec(client_factory, controller_key,
adapter_type)
return controller_key, 0, controller_spec
def get_rdm_disk(hardware_devices, uuid):
"""Gets the RDM disk key."""
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskRawDiskMappingVer1BackingInfo" and
device.backing.lunUuid == uuid):
return device
def get_copy_virtual_disk_spec(client_factory, adapter_type="lsiLogic",
disk_type="preallocated"):
"""Builds the Virtual Disk copy spec."""
dest_spec = client_factory.create('ns0:VirtualDiskSpec')
dest_spec.adapterType = get_vmdk_adapter_type(adapter_type)
dest_spec.diskType = disk_type
return dest_spec
def get_vmdk_create_spec(client_factory, size_in_kb, adapter_type="lsiLogic",
disk_type="preallocated"):
"""Builds the virtual disk create spec."""
create_vmdk_spec = client_factory.create('ns0:FileBackedVirtualDiskSpec')
create_vmdk_spec.adapterType = get_vmdk_adapter_type(adapter_type)
create_vmdk_spec.diskType = disk_type
create_vmdk_spec.capacityKb = size_in_kb
return create_vmdk_spec
def get_rdm_create_spec(client_factory, device, adapter_type="lsiLogic",
disk_type="rdmp"):
"""Builds the RDM virtual disk create spec."""
create_vmdk_spec = client_factory.create('ns0:DeviceBackedVirtualDiskSpec')
create_vmdk_spec.adapterType = get_vmdk_adapter_type(adapter_type)
create_vmdk_spec.diskType = disk_type
create_vmdk_spec.device = device
return create_vmdk_spec
def create_virtual_cdrom_spec(client_factory,
datastore,
controller_key,
file_path,
cdrom_unit_number):
"""Builds spec for the creation of a new Virtual CDROM to the VM."""
config_spec = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
config_spec.operation = "add"
cdrom = client_factory.create('ns0:VirtualCdrom')
cdrom_device_backing = client_factory.create(
'ns0:VirtualCdromIsoBackingInfo')
cdrom_device_backing.datastore = datastore
cdrom_device_backing.fileName = file_path
cdrom.backing = cdrom_device_backing
cdrom.controllerKey = controller_key
cdrom.unitNumber = cdrom_unit_number
cdrom.key = -1
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
cdrom.connectable = connectable_spec
config_spec.device = cdrom
return config_spec
def create_virtual_disk_spec(client_factory, controller_key,
disk_type="preallocated",
file_path=None,
disk_size=None,
linked_clone=False,
unit_number=None,
device_name=None):
"""Builds spec for the creation of a new/ attaching of an already existing
Virtual Disk to the VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "add"
if (file_path is None) or linked_clone:
virtual_device_config.fileOperation = "create"
virtual_disk = client_factory.create('ns0:VirtualDisk')
if disk_type == "rdm" or disk_type == "rdmp":
disk_file_backing = client_factory.create(
'ns0:VirtualDiskRawDiskMappingVer1BackingInfo')
disk_file_backing.compatibilityMode = "virtualMode" \
if disk_type == "rdm" else "physicalMode"
disk_file_backing.diskMode = "independent_persistent"
disk_file_backing.deviceName = device_name or ""
else:
disk_file_backing = client_factory.create(
'ns0:VirtualDiskFlatVer2BackingInfo')
disk_file_backing.diskMode = "persistent"
if disk_type == "thin":
disk_file_backing.thinProvisioned = True
else:
if disk_type == "eagerZeroedThick":
disk_file_backing.eagerlyScrub = True
disk_file_backing.fileName = file_path or ""
connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
connectable_spec.startConnected = True
connectable_spec.allowGuestControl = False
connectable_spec.connected = True
if not linked_clone:
virtual_disk.backing = disk_file_backing
else:
virtual_disk.backing = copy.copy(disk_file_backing)
virtual_disk.backing.fileName = ""
virtual_disk.backing.parent = disk_file_backing
virtual_disk.connectable = connectable_spec
# The Server assigns a Key to the device. Here we pass a -ve random key.
# -ve because actual keys are +ve numbers and we don't
# want a clash with the key that server might associate with the device
virtual_disk.key = -100
virtual_disk.controllerKey = controller_key
virtual_disk.unitNumber = unit_number or 0
virtual_disk.capacityInKB = disk_size or 0
virtual_device_config.device = virtual_disk
return virtual_device_config
def detach_virtual_disk_spec(client_factory, device, destroy_disk=False):
"""Builds spec for the detach of an already existing Virtual Disk from VM.
"""
virtual_device_config = client_factory.create(
'ns0:VirtualDeviceConfigSpec')
virtual_device_config.operation = "remove"
if destroy_disk:
virtual_device_config.fileOperation = "destroy"
virtual_device_config.device = device
return virtual_device_config
def clone_vm_spec(client_factory, location,
power_on=False, snapshot=None, template=False, config=None):
"""Builds the VM clone spec."""
clone_spec = client_factory.create('ns0:VirtualMachineCloneSpec')
clone_spec.location = location
clone_spec.powerOn = power_on
if snapshot:
clone_spec.snapshot = snapshot
if config is not None:
clone_spec.config = config
clone_spec.template = template
return clone_spec
def relocate_vm_spec(client_factory, datastore=None, host=None,
disk_move_type="moveAllDiskBackingsAndAllowSharing"):
"""Builds the VM relocation spec."""
rel_spec = client_factory.create('ns0:VirtualMachineRelocateSpec')
rel_spec.datastore = datastore
rel_spec.diskMoveType = disk_move_type
if host:
rel_spec.host = host
return rel_spec
def get_dummy_vm_create_spec(client_factory, name, data_store_name):
"""Builds the dummy VM create spec."""
config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
config_spec.name = name
config_spec.guestId = "otherGuest"
vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
vm_file_info.vmPathName = "[" + data_store_name + "]"
config_spec.files = vm_file_info
tools_info = client_factory.create('ns0:ToolsConfigInfo')
tools_info.afterPowerOn = True
tools_info.afterResume = True
tools_info.beforeGuestStandby = True
tools_info.beforeGuestShutdown = True
tools_info.beforeGuestReboot = True
config_spec.tools = tools_info
config_spec.numCPUs = 1
config_spec.memoryMB = 4
controller_key = -101
controller_spec = create_controller_spec(client_factory, controller_key)
disk_spec = create_virtual_disk_spec(client_factory, 1024, controller_key)
device_config_spec = [controller_spec, disk_spec]
config_spec.deviceChange = device_config_spec
return config_spec
def get_machine_id_change_spec(client_factory, machine_id_str):
"""Builds the machine id change config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt = client_factory.create('ns0:OptionValue')
opt.key = "machine.id"
opt.value = machine_id_str
virtual_machine_config_spec.extraConfig = [opt]
return virtual_machine_config_spec
def get_add_vswitch_port_group_spec(client_factory, vswitch_name,
port_group_name, vlan_id):
"""Builds the virtual switch port group add spec."""
vswitch_port_group_spec = client_factory.create('ns0:HostPortGroupSpec')
vswitch_port_group_spec.name = port_group_name
vswitch_port_group_spec.vswitchName = vswitch_name
# VLAN ID of 0 means that VLAN tagging is not to be done for the network.
vswitch_port_group_spec.vlanId = int(vlan_id)
policy = client_factory.create('ns0:HostNetworkPolicy')
nicteaming = client_factory.create('ns0:HostNicTeamingPolicy')
nicteaming.notifySwitches = True
policy.nicTeaming = nicteaming
vswitch_port_group_spec.policy = policy
return vswitch_port_group_spec
def get_vnc_config_spec(client_factory, port):
"""Builds the vnc config spec."""
virtual_machine_config_spec = client_factory.create(
'ns0:VirtualMachineConfigSpec')
opt_enabled = client_factory.create('ns0:OptionValue')
opt_enabled.key = "RemoteDisplay.vnc.enabled"
opt_enabled.value = "true"
opt_port = client_factory.create('ns0:OptionValue')
opt_port.key = "RemoteDisplay.vnc.port"
opt_port.value = port
extras = [opt_enabled, opt_port]
virtual_machine_config_spec.extraConfig = extras
return virtual_machine_config_spec
def get_vnc_port(session):
"""Return VNC port for an VM or None if there is no available port."""
min_port = CONF.vmware.vnc_port
port_total = CONF.vmware.vnc_port_total
allocated_ports = _get_allocated_vnc_ports(session)
max_port = min_port + port_total
for port in range(min_port, max_port):
if port not in allocated_ports:
return port
raise exception.ConsolePortRangeExhausted(min_port=min_port,
max_port=max_port)
def _get_allocated_vnc_ports(session):
"""Return an integer set of all allocated VNC ports."""
# TODO(rgerganov): bug #1256944
# The VNC port should be unique per host, not per vCenter
vnc_ports = set()
result = session._call_method(vim_util, "get_objects",
"VirtualMachine", [VNC_CONFIG_KEY])
while result:
for obj in result.objects:
if not hasattr(obj, 'propSet'):
continue
dynamic_prop = obj.propSet[0]
option_value = dynamic_prop.val
vnc_port = option_value.value
vnc_ports.add(int(vnc_port))
token = _get_token(result)
if token:
result = session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
break
return vnc_ports
def search_datastore_spec(client_factory, file_name):
"""Builds the datastore search spec."""
search_spec = client_factory.create('ns0:HostDatastoreBrowserSearchSpec')
search_spec.matchPattern = [file_name]
return search_spec
def _get_token(results):
"""Get the token from the property results."""
return getattr(results, 'token', None)
def _get_reference_for_value(results, value):
for object in results.objects:
if object.obj.value == value:
return object
def _get_object_for_value(results, value):
for object in results.objects:
if object.propSet[0].val == value:
return object.obj
def _get_object_for_optionvalue(results, value):
for object in results.objects:
if hasattr(object, "propSet") and object.propSet:
if object.propSet[0].val.value == value:
return object.obj
def _get_object_from_results(session, results, value, func):
while results:
token = _get_token(results)
object = func(results, value)
if object:
if token:
session._call_method(vim_util,
"cancel_retrieve",
token)
return object
if token:
results = session._call_method(vim_util,
"continue_to_get_objects",
token)
else:
return None
def _cancel_retrieve_if_necessary(session, results):
token = _get_token(results)
if token:
results = session._call_method(vim_util,
"cancel_retrieve",
token)
def _get_vm_ref_from_name(session, vm_name):
"""Get reference to the VM with the name specified."""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
return _get_object_from_results(session, vms, vm_name,
_get_object_for_value)
@vm_ref_cache_from_name
def get_vm_ref_from_name(session, vm_name):
return (_get_vm_ref_from_vm_uuid(session, vm_name) or
_get_vm_ref_from_name(session, vm_name))
def _get_vm_ref_from_uuid(session, instance_uuid):
"""Get reference to the VM with the uuid specified.
This method reads all of the names of the VM's that are running
on the backend, then it filters locally the matching
instance_uuid. It is far more optimal to use
_get_vm_ref_from_vm_uuid.
"""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ["name"])
return _get_object_from_results(session, vms, instance_uuid,
_get_object_for_value)
def _get_vm_ref_from_vm_uuid(session, instance_uuid):
"""Get reference to the VM.
The method will make use of FindAllByUuid to get the VM reference.
This method finds all VM's on the backend that match the
instance_uuid, more specifically all VM's on the backend that have
'config_spec.instanceUuid' set to 'instance_uuid'.
"""
vm_refs = session._call_method(
session._get_vim(),
"FindAllByUuid",
session._get_vim().get_service_content().searchIndex,
uuid=instance_uuid,
vmSearch=True,
instanceUuid=True)
if vm_refs:
return vm_refs[0]
def _get_vm_ref_from_extraconfig(session, instance_uuid):
"""Get reference to the VM with the uuid specified."""
vms = session._call_method(vim_util, "get_objects",
"VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]'])
return _get_object_from_results(session, vms, instance_uuid,
_get_object_for_optionvalue)
@vm_ref_cache_from_instance
def get_vm_ref(session, instance):
"""Get reference to the VM through uuid or vm name."""
uuid = instance['uuid']
vm_ref = (search_vm_ref_by_identifier(session, uuid) or
_get_vm_ref_from_name(session, instance['name']))
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=uuid)
return vm_ref
def search_vm_ref_by_identifier(session, identifier):
"""Searches VM reference using the identifier.
This method is primarily meant to separate out part of the logic for
vm_ref search that could be use directly in the special case of
migrating the instance. For querying VM linked to an instance always
use get_vm_ref instead.
"""
vm_ref = (_get_vm_ref_from_vm_uuid(session, identifier) or
_get_vm_ref_from_extraconfig(session, identifier) or
_get_vm_ref_from_uuid(session, identifier))
return vm_ref
def get_host_ref_from_id(session, host_id, property_list=None):
"""Get a host reference object for a host_id string."""
if property_list is None:
property_list = ['name']
host_refs = session._call_method(
vim_util, "get_objects",
"HostSystem", property_list)
return _get_object_from_results(session, host_refs, host_id,
_get_reference_for_value)
def get_host_id_from_vm_ref(session, vm_ref):
"""This method allows you to find the managed object
ID of the host running a VM. Since vMotion can
change the value, you should not presume that this
is a value that you can cache for very long and
should be prepared to allow for it to change.
:param session: a vSphere API connection
:param vm_ref: a reference object to the running VM
:return: the host_id running the virtual machine
"""
# to prevent typographical errors below
property_name = 'runtime.host'
# a property collector in VMware vSphere Management API
# is a set of local representations of remote values.
# property_set here, is a local representation of the
# properties we are querying for.
property_set = session._call_method(
vim_util, "get_object_properties",
None, vm_ref, vm_ref._type, [property_name])
prop = property_from_property_set(
property_name, property_set)
if prop is not None:
prop = prop.val.value
else:
# reaching here represents an impossible state
raise RuntimeError(
"Virtual Machine %s exists without a runtime.host!"
% (vm_ref))
return prop
def property_from_property_set(property_name, property_set):
'''Use this method to filter property collector results.
Because network traffic is expensive, multiple
VMwareAPI calls will sometimes pile-up properties
to be collected. That means results may contain
many different values for multiple purposes.
This helper will filter a list for a single result
and filter the properties of that result to find
the single value of whatever type resides in that
result. This could be a ManagedObjectReference ID
or a complex value.
:param property_name: name of property you want
:param property_set: all results from query
:return: the value of the property.
'''
for prop in property_set.objects:
p = _property_from_propSet(prop.propSet, property_name)
if p is not None:
return p
def _property_from_propSet(propSet, name='name'):
for p in propSet:
if p.name == name:
return p
def get_host_ref_for_vm(session, instance, props):
"""Get the ESXi host running a VM by its name."""
vm_ref = get_vm_ref(session, instance)
host_id = get_host_id_from_vm_ref(session, vm_ref)
return get_host_ref_from_id(session, host_id, props)
def get_host_name_for_vm(session, instance):
"""Get the ESXi host running a VM by its name."""
host_ref = get_host_ref_for_vm(session, instance, ['name'])
return get_host_name_from_host_ref(host_ref)
def get_host_name_from_host_ref(host_ref):
p = _property_from_propSet(host_ref.propSet)
if p is not None:
return p.val
def get_vm_state_from_name(session, vm_name):
vm_ref = get_vm_ref_from_name(session, vm_name)
vm_state = session._call_method(vim_util, "get_dynamic_property",
vm_ref, "VirtualMachine", "runtime.powerState")
return vm_state
def get_stats_from_cluster(session, cluster):
"""Get the aggregate resource stats of a cluster."""
cpu_info = {'vcpus': 0, 'cores': 0, 'vendor': [], 'model': []}
mem_info = {'total': 0, 'free': 0}
# Get the Host and Resource Pool Managed Object Refs
prop_dict = session._call_method(vim_util, "get_dynamic_properties",
cluster, "ClusterComputeResource",
["host", "resourcePool"])
if prop_dict:
host_ret = prop_dict.get('host')
if host_ret:
host_mors = host_ret.ManagedObjectReference
result = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"HostSystem", host_mors,
["summary.hardware", "summary.runtime"])
for obj in result.objects:
hardware_summary = obj.propSet[0].val
runtime_summary = obj.propSet[1].val
if (runtime_summary.inMaintenanceMode == False and
runtime_summary.connectionState == "connected"):
# Total vcpus is the sum of all pCPUs of individual hosts
# The overcommitment ratio is factored in by the scheduler
cpu_info['vcpus'] += hardware_summary.numCpuThreads
cpu_info['cores'] += hardware_summary.numCpuCores
cpu_info['vendor'].append(hardware_summary.vendor)
cpu_info['model'].append(hardware_summary.cpuModel)
res_mor = prop_dict.get('resourcePool')
if res_mor:
res_usage = session._call_method(vim_util, "get_dynamic_property",
res_mor, "ResourcePool", "summary.runtime.memory")
if res_usage:
# maxUsage is the memory limit of the cluster available to VM's
mem_info['total'] = int(res_usage.maxUsage / units.Mi)
# overallUsage is the hypervisor's view of memory usage by VM's
consumed = int(res_usage.overallUsage / units.Mi)
mem_info['free'] = mem_info['total'] - consumed
stats = {'cpu': cpu_info, 'mem': mem_info}
return stats
def get_cluster_ref_from_name(session, cluster_name):
"""Get reference to the cluster with the name specified."""
cls = session._call_method(vim_util, "get_objects",
"ClusterComputeResource", ["name"])
return _get_object_from_results(session, cls, cluster_name,
_get_object_for_value)
def get_host_ref(session, cluster=None):
"""Get reference to a host within the cluster specified."""
if cluster is None:
results = session._call_method(vim_util, "get_objects",
"HostSystem")
_cancel_retrieve_if_necessary(session, results)
host_mor = results.objects[0].obj
else:
host_ret = session._call_method(vim_util, "get_dynamic_property",
cluster, "ClusterComputeResource",
"host")
if not host_ret or not host_ret.ManagedObjectReference:
msg = _('No host available on cluster')
raise exception.NoValidHost(reason=msg)
host_mor = host_ret.ManagedObjectReference[0]
return host_mor
def propset_dict(propset):
"""Turn a propset list into a dictionary
PropSet is an optional attribute on ObjectContent objects
that are returned by the VMware API.
You can read more about these at:
http://pubs.vmware.com/vsphere-51/index.jsp
#com.vmware.wssdk.apiref.doc/
vmodl.query.PropertyCollector.ObjectContent.html
:param propset: a property "set" from ObjectContent
:return: dictionary representing property set
"""
if propset is None:
return {}
#TODO(hartsocks): once support for Python 2.6 is dropped
# change to {[(prop.name, prop.val) for prop in propset]}
return dict([(prop.name, prop.val) for prop in propset])
def _select_datastore(data_stores, best_match, datastore_regex=None):
"""Find the most preferable datastore in a given RetrieveResult object.
:param data_stores: a RetrieveResult object from vSphere API call
:param best_match: the current best match for datastore
:param datastore_regex: an optional regular expression to match names
:return: datastore_ref, datastore_name, capacity, freespace
"""
# data_stores is actually a RetrieveResult object from vSphere API call
for obj_content in data_stores.objects:
# the propset attribute "need not be set" by returning API
if not hasattr(obj_content, 'propSet'):
continue
propdict = propset_dict(obj_content.propSet)
# Local storage identifier vSphere doesn't support CIFS or
# vfat for datastores, therefore filtered
ds_type = propdict['summary.type']
ds_name = propdict['summary.name']
if ((ds_type == 'VMFS' or ds_type == 'NFS') and
propdict.get('summary.accessible')):
if datastore_regex is None or datastore_regex.match(ds_name):
new_ds = DSRecord(
datastore=obj_content.obj,
name=ds_name,
capacity=propdict['summary.capacity'],
freespace=propdict['summary.freeSpace'])
# favor datastores with more free space
if new_ds.freespace > best_match.freespace:
best_match = new_ds
return best_match
def get_datastore_ref_and_name(session, cluster=None, host=None,
datastore_regex=None):
"""Get the datastore list and choose the most preferable one."""
if cluster is None and host is None:
data_stores = session._call_method(vim_util, "get_objects",
"Datastore", ["summary.type", "summary.name",
"summary.capacity", "summary.freeSpace",
"summary.accessible"])
else:
if cluster is not None:
datastore_ret = session._call_method(
vim_util,
"get_dynamic_property", cluster,
"ClusterComputeResource", "datastore")
else:
datastore_ret = session._call_method(
vim_util,
"get_dynamic_property", host,
"HostSystem", "datastore")
if not datastore_ret:
raise exception.DatastoreNotFound()
data_store_mors = datastore_ret.ManagedObjectReference
data_stores = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"Datastore", data_store_mors,
["summary.type", "summary.name",
"summary.capacity", "summary.freeSpace",
"summary.accessible"])
best_match = DSRecord(datastore=None, name=None,
capacity=None, freespace=0)
while data_stores:
best_match = _select_datastore(data_stores, best_match,
datastore_regex)
token = _get_token(data_stores)
if not token:
break
data_stores = session._call_method(vim_util,
"continue_to_get_objects",
token)
if best_match.datastore:
return best_match
if datastore_regex:
raise exception.DatastoreNotFound(
_("Datastore regex %s did not match any datastores")
% datastore_regex.pattern)
else:
raise exception.DatastoreNotFound()
def _get_allowed_datastores(data_stores, datastore_regex, allowed_types):
allowed = []
for obj_content in data_stores.objects:
# the propset attribute "need not be set" by returning API
if not hasattr(obj_content, 'propSet'):
continue
propdict = propset_dict(obj_content.propSet)
# Local storage identifier vSphere doesn't support CIFS or
# vfat for datastores, therefore filtered
ds_type = propdict['summary.type']
ds_name = propdict['summary.name']
if (propdict['summary.accessible'] and ds_type in allowed_types):
if datastore_regex is None or datastore_regex.match(ds_name):
allowed.append({'ref': obj_content.obj, 'name': ds_name})
return allowed
def get_available_datastores(session, cluster=None, datastore_regex=None):
"""Get the datastore list and choose the first local storage."""
if cluster:
mobj = cluster
type = "ClusterComputeResource"
else:
mobj = get_host_ref(session)
type = "HostSystem"
ds = session._call_method(vim_util, "get_dynamic_property", mobj,
type, "datastore")
if not ds:
return []
data_store_mors = ds.ManagedObjectReference
# NOTE(garyk): use utility method to retrieve remote objects
data_stores = session._call_method(vim_util,
"get_properties_for_a_collection_of_objects",
"Datastore", data_store_mors,
["summary.type", "summary.name", "summary.accessible"])
allowed = []
while data_stores:
allowed.extend(_get_allowed_datastores(data_stores, datastore_regex,
['VMFS', 'NFS']))
token = _get_token(data_stores)
if not token:
break
data_stores = session._call_method(vim_util,
"continue_to_get_objects",
token)
return allowed
def get_vmdk_backed_disk_uuid(hardware_devices, volume_uuid):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
volume_uuid in device.backing.fileName):
return device.backing.uuid
def get_vmdk_backed_disk_device(hardware_devices, uuid):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk" and
device.backing.__class__.__name__ ==
"VirtualDiskFlatVer2BackingInfo" and
device.backing.uuid == uuid):
return device
def get_vmdk_volume_disk(hardware_devices, path=None):
if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
hardware_devices = hardware_devices.VirtualDevice
for device in hardware_devices:
if (device.__class__.__name__ == "VirtualDisk"):
if not path or path == device.backing.fileName:
return device
def get_res_pool_ref(session, cluster, node_mo_id):
"""Get the resource pool."""
if cluster is None:
# With no cluster named, use the root resource pool.
results = session._call_method(vim_util, "get_objects",
"ResourcePool")
_cancel_retrieve_if_necessary(session, results)
# The 0th resource pool is always the root resource pool on both ESX
# and vCenter.
res_pool_ref = results.objects[0].obj
else:
if cluster.value == node_mo_id:
# Get the root resource pool of the cluster
res_pool_ref = session._call_method(vim_util,
"get_dynamic_property",
cluster,
"ClusterComputeResource",
"resourcePool")
return res_pool_ref
def get_all_cluster_mors(session):
"""Get all the clusters in the vCenter."""
try:
results = session._call_method(vim_util, "get_objects",
"ClusterComputeResource", ["name"])
_cancel_retrieve_if_necessary(session, results)
return results.objects
except Exception as excep:
LOG.warn(_("Failed to get cluster references %s") % excep)
def get_all_res_pool_mors(session):
"""Get all the resource pools in the vCenter."""
try:
results = session._call_method(vim_util, "get_objects",
"ResourcePool")
_cancel_retrieve_if_necessary(session, results)
return results.objects
except Exception as excep:
LOG.warn(_("Failed to get resource pool references " "%s") % excep)
def get_dynamic_property_mor(session, mor_ref, attribute):
"""Get the value of an attribute for a given managed object."""
return session._call_method(vim_util, "get_dynamic_property",
mor_ref, mor_ref._type, attribute)
def find_entity_mor(entity_list, entity_name):
"""Returns managed object ref for given cluster or resource pool name."""
return [mor for mor in entity_list if (hasattr(mor, 'propSet') and
mor.propSet[0].val == entity_name)]
def get_all_cluster_refs_by_name(session, path_list):
"""Get reference to the Cluster, ResourcePool with the path specified.
The path is the display name. This can be the full path as well.
The input will have the list of clusters and resource pool names
"""
cls = get_all_cluster_mors(session)
if not cls:
return
res = get_all_res_pool_mors(session)
if not res:
return
path_list = [path.strip() for path in path_list]
list_obj = []
for entity_path in path_list:
# entity_path could be unique cluster and/or resource-pool name
res_mor = find_entity_mor(res, entity_path)
cls_mor = find_entity_mor(cls, entity_path)
cls_mor.extend(res_mor)
for mor in cls_mor:
list_obj.append((mor.obj, mor.propSet[0].val))
return get_dict_mor(session, list_obj)
def get_dict_mor(session, list_obj):
"""The input is a list of objects in the form
(manage_object,display_name)
The managed object will be in the form
{ value = "domain-1002", _type = "ClusterComputeResource" }
Output data format:
dict_mors = {
'respool-1001': { 'cluster_mor': clusterMor,
'res_pool_mor': resourcePoolMor,
'name': display_name },
'domain-1002': { 'cluster_mor': clusterMor,
'res_pool_mor': resourcePoolMor,
'name': display_name },
}
"""
dict_mors = {}
for obj_ref, path in list_obj:
if obj_ref._type == "ResourcePool":
# Get owner cluster-ref mor
cluster_ref = get_dynamic_property_mor(session, obj_ref, "owner")
dict_mors[obj_ref.value] = {'cluster_mor': cluster_ref,
'res_pool_mor': obj_ref,
'name': path,
}
else:
# Get default resource pool of the cluster
res_pool_ref = get_dynamic_property_mor(session,
obj_ref, "resourcePool")
dict_mors[obj_ref.value] = {'cluster_mor': obj_ref,
'res_pool_mor': res_pool_ref,
'name': path,
}
return dict_mors
def get_mo_id_from_instance(instance):
"""Return the managed object ID from the instance.
The instance['node'] will have the hypervisor_hostname field of the
compute node on which the instance exists or will be provisioned.
This will be of the form
'respool-1001(MyResPoolName)'
'domain-1001(MyClusterName)'
"""
return instance['node'].partition('(')[0]
def get_vmdk_adapter_type(adapter_type):
"""Return the adapter type to be used in vmdk descriptor.
Adapter type in vmdk descriptor is same for LSI-SAS & LSILogic
because Virtual Disk Manager API does not recognize the newer controller
types.
"""
if adapter_type == "lsiLogicsas":
vmdk_adapter_type = "lsiLogic"
else:
vmdk_adapter_type = adapter_type
return vmdk_adapter_type
def clone_vmref_for_instance(session, instance, vm_ref, host_ref, ds_ref,
vmfolder_ref):
"""Clone VM and link the cloned VM to the instance.
Clones the passed vm_ref into a new VM and links the cloned vm to
the passed instance.
"""
if vm_ref is None:
LOG.warn(_("vmwareapi:vm_util:clone_vmref_for_instance, called "
"with vm_ref=None"))
raise error_util.MissingParameter(param="vm_ref")
# Get the clone vm spec
client_factory = session._get_vim().client.factory
rel_spec = relocate_vm_spec(client_factory, ds_ref, host_ref)
extra_opts = {'nvp.vm-uuid': instance['uuid']}
config_spec = get_vm_extra_config_spec(client_factory, extra_opts)
config_spec.instanceUuid = instance['uuid']
clone_spec = clone_vm_spec(client_factory, rel_spec, config=config_spec)
# Clone VM on ESX host
LOG.debug(_("Cloning VM for instance %s"), instance['uuid'],
instance=instance)
vm_clone_task = session._call_method(session._get_vim(), "CloneVM_Task",
vm_ref, folder=vmfolder_ref,
name=instance['uuid'],
spec=clone_spec)
session._wait_for_task(vm_clone_task)
LOG.debug(_("Cloned VM for instance %s"), instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
def disassociate_vmref_from_instance(session, instance, vm_ref=None,
suffix='-orig'):
"""Disassociates the VM linked to the instance.
Disassociates the VM linked to the instance by performing the following
1. Update the extraConfig property for nvp.vm-uuid to be replaced with
instance[uuid]+suffix
2. Rename the VM to be instance[uuid]+suffix instead
3. Reset the instanceUUID of the VM to a new generated value
"""
if vm_ref is None:
vm_ref = get_vm_ref(session, instance)
extra_opts = {'nvp.vm-uuid': instance['uuid'] + suffix}
client_factory = session._get_vim().client.factory
reconfig_spec = get_vm_extra_config_spec(client_factory, extra_opts)
reconfig_spec.name = instance['uuid'] + suffix
reconfig_spec.instanceUuid = ''
LOG.debug(_("Disassociating VM from instance %s"), instance['uuid'],
instance=instance)
reconfig_task = session._call_method(session._get_vim(), "ReconfigVM_Task",
vm_ref, spec=reconfig_spec)
session._wait_for_task(reconfig_task)
LOG.debug(_("Disassociated VM from instance %s"), instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
def associate_vmref_for_instance(session, instance, vm_ref=None,
suffix='-orig'):
"""Associates the VM to the instance.
Associates the VM to the instance by performing the following
1. Update the extraConfig property for nvp.vm-uuid to be replaced with
instance[uuid]
2. Rename the VM to be instance[uuid]
3. Reset the instanceUUID of the VM to be instance[uuid]
"""
if vm_ref is None:
vm_ref = search_vm_ref_by_identifier(session,
instance['uuid'] + suffix)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance['uuid']
+ suffix)
extra_opts = {'nvp.vm-uuid': instance['uuid']}
client_factory = session._get_vim().client.factory
reconfig_spec = get_vm_extra_config_spec(client_factory, extra_opts)
reconfig_spec.name = instance['uuid']
reconfig_spec.instanceUuid = instance['uuid']
LOG.debug(_("Associating VM to instance %s"), instance['uuid'],
instance=instance)
reconfig_task = session._call_method(session._get_vim(), "ReconfigVM_Task",
vm_ref, spec=reconfig_spec)
session._wait_for_task(reconfig_task)
LOG.debug(_("Associated VM to instance %s"), instance['uuid'],
instance=instance)
# Invalidate the cache, so that it is refetched the next time
vm_ref_cache_delete(instance['uuid'])
|
luogangyi/bcec-nova
|
nova/virt/vmwareapi/vm_util.py
|
Python
|
apache-2.0
| 56,473
|
"""
Testing can be done on the command line using httpie:
.. code:: bash
http --pretty=all --verbose http://localhost:8000/ \
service==CSW \
version==2.0.2 \
request==GetRecordById \
Id==c1fdc10a-9170-11e4-ba66-0019995d2a58 \
ElementSetName==full \
Outputschema==http://www.isotc211.org/2005/gmd \
| less -R
Or with the browser and GET requests:
http://localhost:8000/csw/server/?
SERVICE=CSW&version=2.0.2&
REQUEST=GetRecords&
resultType=results&
constraintLanguage=CQL_TEXT&
constraint_language_version=1.1.0&
constraint=TempExtent_begin%20%3E=%20%272014-10-12T00:00:00Z%27&
elementSetName=full&
outputSchema=http://www.isotc211.org/2005/gmd&
typenames=gmd:MD_Metadata
"""
import math
import hashlib
import base64
import os
import re
import json
import pyproj
from django.db import models, connection
from django.dispatch import receiver
from django.db.models.signals import post_save, pre_save, post_delete, pre_delete
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import RegexValidator
from django.utils import timezone
from django.core.files.storage import FileSystemStorage
slug_re = re.compile(r'^[a-z0-9_]+$')
validate_slug = RegexValidator(slug_re, "Slug can only contain lowercase letters, numbers and underscores", "invalid")
#load extra epsg
epsg_extra = {}
try:
epsgs = None
with open(settings.EPSG_EXTRA_FILE, 'rb') as f:
epsgs = f.read()
epsg_re = re.compile("^<([0-9]+)>\s+(.+)\s+<>$")
epsgs = [l.strip() for l in epsgs.splitlines()]
#remove empty lines, comment lines and incorrect lines
epsgs = [l for l in epsgs if l and l[0] != "#"]
#parse each line
for l in epsgs:
try:
m = epsg_re.match(l)
if m:
epsg_extra["EPSG:{}".format(m.group(1))] = m.group(2)
except:
pass
except:
pass
class PreviewTile(object):
@staticmethod
def _preview_tile(srs_bbox, bbox, default_tilebox):
#compute the tile which can cover the whole bbox
min_distance = min([srs_bbox[2] - srs_bbox[0], srs_bbox[3] - srs_bbox[1]])
tile_size = max([bbox[2] - bbox[0], bbox[3] - bbox[1]])
max_tiles = int(min_distance / tile_size)
max_level = -1
tile_bbox = None
while (max_tiles > 0):
max_tiles /= 2
max_level += 1
while(max_level >= 0):
distancePerTile = float(min_distance) / math.pow(2, max_level)
xtile = int((bbox[0] - srs_bbox[0]) / distancePerTile)
ytile = int((bbox[1] - srs_bbox[1]) / distancePerTile)
tile_bbox = [xtile * distancePerTile + srs_bbox[0], ytile * distancePerTile + srs_bbox[1], (xtile + 1) * distancePerTile + srs_bbox[0], (ytile + 1) * distancePerTile + srs_bbox[1]]
if tile_bbox[0] <= bbox[0] and tile_bbox[1] <= bbox[1] and tile_bbox[2] >= bbox[2] and tile_bbox[3] >= bbox[3]:
break
else:
max_level -= 1
tile_bbox = None
if not tile_bbox:
tile_bbox = default_tilebox
return tile_bbox
@staticmethod
def EPSG_4326(bbox):
#compute the tile which can cover the whole bbox
#gridset bound [-180, -90, 180, 90]
return PreviewTile._preview_tile([-180, -90, 180, 90], bbox, [0, -90, 180, 90])
@staticmethod
def EPSG_3857(bbox):
#compute the tile which can cover the whole bbox
#gridset bound [-20, 037, 508.34, -20, 037, 508.34, 20, 037, 508.34, 20, 037, 508.34]
return PreviewTile._preview_tile([-20037508.34, -20037508.34, 20037508.34, 20037508.34], bbox, [-20037508.34, -20037508.34, 20037508.34, 20037508.34])
class PycswConfig(models.Model):
language = models.CharField(max_length=10, default="en-US")
max_records = models.IntegerField(default=10)
#log_level # can use django's config
#log_file # can use django's config
#ogc_schemas_base
#federated_catalogues
#pretty_print
#gzip_compress_level
#domain_query_type
#domain_counts
#spatial_ranking
transactions = models.BooleanField(default=False,
help_text="Enable transactions")
allowed_ips = models.CharField(
max_length=255, blank=True, default="127.0.0.1",
help_text="IP addresses that are allowed to make transaction requests"
)
harvest_page_size = models.IntegerField(default=10)
title = models.CharField(max_length=50)
abstract = models.TextField()
keywords = models.CharField(max_length=255)
keywords_type = models.CharField(max_length=255)
fees = models.CharField(max_length=100)
access_constraints = models.CharField(max_length=255)
point_of_contact = models.ForeignKey("Collaborator")
repository_filter = models.CharField(max_length=255, blank=True)
inspire_enabled = models.BooleanField(default=False)
inspire_languages = models.CharField(max_length=255, blank=True)
inspire_default_language = models.CharField(max_length=30, blank=True)
inspire_date = models.DateTimeField(null=True, blank=True)
gemet_keywords = models.CharField(max_length=255, blank=True)
conformity_service = models.CharField(max_length=255, blank=True)
temporal_extent_start = models.DateTimeField(null=True, blank=True)
temporal_extent_end = models.DateTimeField(null=True, blank=True)
service_type_version = models.CharField(max_length=10, blank=True)
class Meta:
verbose_name = "PyCSW Configuration"
verbose_name_plural = "PyCSW Configuration"
class Organization(models.Model):
name = models.CharField(max_length=255)
short_name = models.CharField(max_length=30)
url = models.URLField()
address = models.CharField(max_length=255, blank=True)
city = models.CharField(max_length=50, blank=True)
state_or_province = models.CharField(max_length=50, blank=True)
postal_code = models.CharField(max_length=30, blank=True)
country = models.CharField(max_length=50, blank=True)
def __unicode__(self):
return self.short_name
class Collaborator(models.Model):
name = models.CharField(max_length=255)
position = models.CharField(max_length=255)
email = models.EmailField()
organization = models.ForeignKey(Organization,
related_name="collaborators")
url = models.URLField(blank=True)
phone = models.CharField(max_length=50, blank=True)
fax = models.CharField(max_length=50, blank=True)
hours_of_service = models.CharField(max_length=50, blank=True)
contact_instructions = models.CharField(max_length=255, blank=True)
def __unicode__(self):
return "{}({})".format(self.name, self.organization.short_name)
class Tag(models.Model):
name = models.SlugField(max_length=255, unique=True)
description = models.TextField()
def __str__(self):
return self.name
class Record(models.Model):
identifier = models.CharField(
max_length=255, db_index=True, help_text="Maps to pycsw:Identifier")
title = models.CharField(max_length=255, null=True, blank=True,
help_text='Maps to pycsw:Title')
typename = models.CharField(
max_length=100, default="", db_index=True, blank=True,
help_text="Maps to pycsw:Typename", editable=False
)
schema = models.CharField(
max_length=100, default="",
help_text="Maps to pycsw:Schema", db_index=True, blank=True, editable=False
)
insert_date = models.DateTimeField(
auto_now_add=True, help_text='Maps to pycsw:InsertDate')
xml = models.TextField(
default='',
editable=False,
help_text=' Maps to pycsw:XML'
)
any_text = models.TextField(help_text='Maps to pycsw:AnyText', null=True, blank=True)
modified = models.DateTimeField(
null=True, blank=True,
help_text='Maps to pycsw:Modified'
)
bounding_box = models.TextField(null=True, blank=True,
help_text='Maps to pycsw:BoundingBox.It\'s a WKT geometry')
abstract = models.TextField(blank=True, null=True,
help_text='Maps to pycsw:Abstract')
keywords = models.CharField(max_length=255, blank=True, null=True,
help_text='Maps to pycsw:Keywords')
tags = models.ManyToManyField(Tag, blank=True)
publication_date = models.DateTimeField(
null=True, blank=True,
help_text='Maps to pycsw:PublicationDate'
)
service_type = models.CharField(max_length=30, null=True, blank=True,
help_text='Maps to pycsw:ServiceType')
service_type_version = models.CharField(
max_length=30, null=True, blank=True, editable=False,
help_text='Maps to pycsw:ServiceTypeVersion'
)
links = models.TextField(null=True, blank=True, editable=False,
help_text='Maps to pycsw:Links')
crs = models.CharField(max_length=255, null=True, blank=True, help_text='Maps to pycsw:CRS')
# Custom fields
active = models.BooleanField(default=True, editable=False)
bbox_re = re.compile('POLYGON\s*\(\(([\+\-0-9\.]+)\s+([\+\-0-9\.]+)\s*\, \s*[\+\-0-9\.]+\s+[\+\-0-9\.]+\s*\, \s*([\+\-0-9\.]+)\s+([\+\-0-9\.]+)\s*\, \s*[\+\-0-9\.]+\s+[\+\-0-9\.]+\s*\, \s*[\+\-0-9\.]+\s+[\+\-0-9\.]+\s*\)\)')
legend = models.FileField(upload_to='catalogue/legends', null=True, blank=True)
source_legend = models.FileField(upload_to='catalogue/legends/source', null=True, blank=True,editable=False)
@property
def bbox(self):
if self.bounding_box:
try:
return [float(v) for v in self.bbox_re.match(self.bounding_box).groups()]
except:
return None
else:
return None
def __unicode__(self):
return self.identifier
def default_style(self, format):
try:
return self.styles.get(format=format, default=True)
except Style.DoesNotExist:
return None
@property
def metadata_link(self ):
return {
'endpoint': '{0}/catalogue/'.format(settings.BASE_URL),
'version': '2.0.2',
'type': 'CSW',
'link':'{0}/catalogue/?version=2.0.2&service=CSW&request=GetRecordById&elementSetName=full&typenames=csw:Record&resultType=results&id={1}'.format(settings.BASE_URL, self.identifier)
}
@property
def ows_resource(self ):
links = self.ows_links
resources = []
for link in links:
r = re.split("\t", link)
sample_link = r[3]
r = json.loads(r[2])
if 'WMS' in r['protocol']:
_type = 'WMS'
elif 'WFS' in r['protocol']:
_type = 'WFS'
resource = {
'type': _type,
'version': r['version'],
'endpoint': r['linkage'],
'link': sample_link
}
resource.update(r)
resources.append(resource)
return resources
@property
def style_links(self):
return self.get_resource_links('style')
@property
def ows_links(self):
return self.get_resource_links('ows')
def get_resource_links(self, _type):
if self.links:
links = self.links.split('^')
else:
links = []
if _type =='style':
style_links = []
for link in links:
r = re.split("\t", link)
r_json = json.loads(r[2])
if 'application' in r_json['protocol']:
style_links.append(link)
links = style_links
elif _type == 'ows':
ows_links = []
for link in links:
r = re.split("\t", link)
r_json = json.loads(r[2])
if 'OGC' in r_json['protocol']:
ows_links.append(link)
links = ows_links
return links
def _calculate_from_bbox(self, side):
bbox = []
if self.bounding_box:
try:
bbox = json.loads(self.bounding_box)
if not bbox or not isinstance(bbox, list) or len(bbox) != 4:
if side == 'width':
return 400
elif side == 'height':
return 500
except:
if side == 'width':
return 400
elif side == 'height':
return 500
if side == 'width':
return int(bbox[2]) - int(bbox[0])
elif side == 'height':
return int(bbox[3]) - int(bbox[1])
def generate_ows_link(self, endpoint, service_type, service_version):
if service_version in ("1.1.0", "1.1"):
service_version = "1.1.0"
elif service_version in ("2.0.0", "2", "2.0"):
service_version = "2.0.0"
elif service_version in ("1", "1.0", "1.0.0"):
service_version = "1.0.0"
endpoint = endpoint.strip()
original_endpoint = endpoint
#parse endpoint's parameters
endpoint = endpoint.split("?", 1)
endpoint, endpoint_parameters = (endpoint[0], endpoint[1]) if len(endpoint) == 2 else (endpoint[0], None)
endpoint_parameters = endpoint_parameters.split("&") if endpoint_parameters else None
endpoint_parameters = dict([(p.split("=", 1)[0].upper(), p.split("=", 1)) for p in endpoint_parameters] if endpoint_parameters else [])
#get target_crs
target_crs = None
if service_type == "WFS":
target_crs = [endpoint_parameters.get(k)[1] for k in ["SRSNAME"] if k in endpoint_parameters]
elif service_type in ["WMS", "GWC"]:
target_crs = [endpoint_parameters.get(k)[1] for k in ["SRS","CRS"] if k in endpoint_parameters]
if target_crs:
target_crs = target_crs[0].upper()
else:
target_crs = self.crs.upper() if self.crs else None
#transform the bbox between coordinate systems, if required
bbox = self.bbox or []
if bbox:
if target_crs != self.crs:
try:
if self.crs.upper() in epsg_extra:
p1 = pyproj.Proj(epsg_extra[self.crs.upper()])
else:
p1 = pyproj.Proj(init=self.crs)
if target_crs in epsg_extra:
p2 = pyproj.Proj(epsg_extra[target_crs])
else:
p2 = pyproj.Proj(init=target_crs)
bbox[0], bbox[1] = pyproj.transform(p1, p2, bbox[0], bbox[1])
bbox[2], bbox[3] = pyproj.transform(p1, p2, bbox[2], bbox[3])
except Exception as e:
raise ValidationError("Transform the bbox of layer({0}) from crs({1}) to crs({2}) failed.{3}".format(self.identifier, self.crs, target_crs, str(e)))
if service_type == "WFS":
#to limit the returned features, shrink the original bbox to 10 percent
percent = 0.1
shrinked_min = lambda min, max :(max - min) / 2 - (max - min) * percent / 2
shrinked_max = lambda min, max :(max - min) / 2 + (max - min) * percent / 2
shrinked_bbox = [shrinked_min(bbox[0], bbox[2]), shrinked_min(bbox[1], bbox[3]), shrinked_max(bbox[0], bbox[2]), shrinked_max(bbox[1], bbox[3])]
else:
shrinked_bbox = None
bbox2str = lambda bbox, service, version: ', '.join(str(c) for c in bbox) if service != "WFS" or version == "1.0.0" else ", ".join([str(c) for c in [bbox[1], bbox[0], bbox[3], bbox[2]]])
if service_type == "WFS":
kvp = {
"SERVICE":"WFS",
"REQUEST":"GetFeature",
"VERSION":service_version,
}
parameters = {}
if self.crs:
kvp["SRSNAME"] = self.crs.upper()
if target_crs:
parameters["crs"] = target_crs
is_geoserver = endpoint.find("geoserver") >= 0
if service_version == "1.1.0":
if is_geoserver:
kvp["maxFeatures"] = 20
elif shrinked_bbox:
kvp["BBOX"] = bbox2str(shrinked_bbox, service_type, service_version)
kvp["TYPENAME"] = self.identifier
elif service_version == "2.0.0":
if is_geoserver:
kvp["count"] = 20
elif shrinked_bbox:
kvp["BBOX"] = bbox2str(shrinked_bbox, service_type, service_version)
kvp["TYPENAMES"] = self.identifier
else:
if shrinked_bbox:
kvp["BBOX"] = bbox2str(shrinked_bbox, service_type, service_version)
kvp["TYPENAME"] = self.identifier
elif service_type == "WMS":
kvp = {
"SERVICE":"WMS",
"REQUEST":"GetMap",
"VERSION":service_version,
"LAYERS":self.identifier,
("SRS", "CRS"):self.crs.upper(),
"WIDTH":self.width,
"HEIGHT":self.height,
"FORMAT":"image/png"
}
parameters = {
"crs":target_crs,
"format":endpoint_parameters["FORMAT"][1] if "FORMAT" in endpoint_parameters else kvp["FORMAT"],
}
if bbox:
kvp["BBOX"] = bbox2str(bbox, service_type, service_version)
elif service_type == "GWC":
service_type = "WMS"
kvp = {
"SERVICE":"WMS",
"REQUEST":"GetMap",
"VERSION":service_version,
"LAYERS":self.identifier,
("SRS", "CRS"):self.crs.upper(),
"WIDTH":1024,
"HEIGHT":1024,
"FORMAT":"image/png"
}
parameters = {
"crs": target_crs,
"format":endpoint_parameters["FORMAT"][1] if "FORMAT" in endpoint_parameters else kvp["FORMAT"],
"width":endpoint_parameters["WIDTH"][1] if "WIDTH" in endpoint_parameters else kvp["WIDTH"],
"height":endpoint_parameters["HEIGHT"][1] if "HEIGHT" in endpoint_parameters else kvp["HEIGHT"],
}
if not bbox:
#bbox is null, use australian bbox
bbox = [108.0000, -45.0000, 155.0000, -10.0000]
p1 = pyproj.Proj(init="EPSG:4326")
p2 = pyproj.Proj(init=target_crs)
bbox[0], bbox[1] = pyproj.transform(p1, p2, bbox[0], bbox[1])
bbox[2], bbox[3] = pyproj.transform(p1, p2, bbox[2], bbox[3])
if not hasattr(PreviewTile, target_crs.replace(":", "_")):
raise Exception("GWC service don't support crs({}) ".format(target_crs))
tile_bbox = getattr(PreviewTile, target_crs.replace(":", "_"))(bbox)
kvp["BBOX"] = bbox2str(tile_bbox, service_type, service_version)
else:
raise Exception("Unknown service type({})".format(service_type))
is_exist = lambda k: any([n.upper() in endpoint_parameters for n in (k if isinstance(k, tuple) or isinstance(k, list) else [k])])
querystring = "&".join(["{}={}".format(k[0] if isinstance(k, tuple) or isinstance(k, list) else k, v) for k, v in kvp.items() if not is_exist(k) ])
if querystring:
if original_endpoint[-1] in ("?", "&"):
link = "{}{}".format(original_endpoint, querystring)
elif '?' in original_endpoint:
link = "{}&{}".format(original_endpoint, querystring)
else:
link = "{}?{}".format(original_endpoint, querystring)
else:
link = original_endpoint
#get the endpoint after removing ows related parameters
if endpoint_parameters:
is_exist = lambda k: any([ any([k == key.upper() for key in item_key]) if isinstance(item_key, tuple) or isinstance(item_key, list) else k == item_key.upper() for item_key in kvp ])
endpoint_querystring = "&".join(["{}={}".format(*v) for k, v in endpoint_parameters.items() if not is_exist(k) ])
if endpoint_querystring:
endpoint = "{}?{}".format(endpoint, endpoint_querystring)
#schema = '{{"protocol":"OGC:{0}", "linkage":"{1}", "version":"{2}"}}'.format(service_type.upper(), endpoint, service_version)
schema = {
"protocol":"OGC:{}".format(service_type.upper()),
"linkage":endpoint,
"version":service_version,
}
schema.update(parameters)
return 'None\tNone\t{0}\t{1}'.format(json.dumps(schema), link)
@staticmethod
def generate_style_link(style):
#schema = '{{"protocol":"application/{0}", "name":"{1}", "default":"{2}", "linkage":"{3}/media/"}}'.format(style.format.lower(), style.name, style.default, settings.BASE_URL)
schema = {
"protocol" : "application/{}".format(style.format.lower()),
"name": style.name,
"default": style.default,
"linkage":"{}/media/a".format(settings.BASE_URL)
}
return 'None\tNone\t{0}\t{1}/media/{2}'.format(json.dumps(schema), settings.BASE_URL, style.content)
@staticmethod
def update_links(resources, record):
pos = 1
links = ''
for r in resources:
if pos == 1:
links += r
else:
links += '^{0}'.format(r)
pos += 1
record.links = links
record.save()
@property
def width(self):
return self._calculate_from_bbox('width')
@property
def height(self):
return self._calculate_from_bbox('height')
@property
def sld(self):
"""
The default sld style file
if not exist, return None
"""
return self.default_style("SLD")
@property
def lyr(self):
"""
The default lyr style file
if not exist, return None
"""
return self.default_style("LYR")
@property
def qml(self):
"""
The default qml style file
if not exist, return None
"""
return self.default_style("QML")
"""
Used to check the default style
for a particular format. If it does
not exist it sets the first style as
the default
Return the configured default style; otherwise return None
"""
def setup_default_styles(self, format):
default_style = self.default_style(format)
if default_style:
return default_style
else:
style = None
try:
#no default style is configured, try to get the builtin one as the default style
style = self.styles.get(format=format, name=Style.BUILTIN)
except:
#no builtin style try to get the first added one as the default style
style = self.styles.filter(format=format).order_by("name").first()
if style:
style.default = True
setattr(style,"triggered_default_style_setting",True)
style.save(update_fields=["default"])
return style
else:
return None
def delete(self, using=None):
if self.active:
raise ValidationError("Can not delete the active record ({}).".format(self.identifier))
else:
super(Record, self).delete(using)
class Meta:
ordering = ['identifier']
@receiver(pre_save, sender=Record)
def update_modify_date(sender, instance, **kwargs):
if instance.pk:
update_fields=kwargs.get("update_fields", None)
if not update_fields or any([f in ("title","abstract","keywords","links") for f in update_fields]):
db_instance = Record.objects.get(pk = instance.pk)
if any([getattr(db_instance,f) != getattr(instance,f) for f in ("title","abstract","keywords","links")]):
#geoserver related columns are changed, set the modified to now
instance.modified = timezone.now()
#add field "modified" into the update field list.
if update_fields and "modified" not in update_fields:
if not isinstance(update_fields,list):
update_fields = [f for f in update_fields]
kwargs["update_fields"] = update_fields
update_fields.append("modified")
def styleFilePath(instance,filename):
return "catalogue/styles/{}_{}.{}".format(instance.record.identifier.replace(':','_'),instance.name.split('.')[0],instance.format.lower())
class StyleStorage(FileSystemStorage):
def get_available_name(self, name, max_length=None):
"""
If the name already exist, remove it
"""
if self.exists(name):
self.delete(name)
return name
class Style(models.Model):
BUILTIN = "builtin"
FORMAT_CHOICES = (
('SLD', 'SLD'),
('QML', 'QML'),
('LYR', 'LAYER')
)
record = models.ForeignKey(Record, related_name='styles')
name = models.CharField(max_length=255)
format = models.CharField(max_length=3, choices=FORMAT_CHOICES)
default = models.BooleanField(default=False)
content = models.FileField(upload_to=styleFilePath,storage=StyleStorage())
@property
def identifier(self):
return "{}:{}".format(self.record.identifier, self.name)
def clean(self):
from django.core.exceptions import ValidationError
if not self.pk and self.name == Style.BUILTIN:
raise ValidationError("Can't add a builtin style.")
"""
simply reset the default style to the current style if the current style is configured as default style
if getattr(self, "record", None) and self.default:
try:
duplicate = Style.objects.exclude(pk=self.pk).get(record=self.record, format=self.format, default=True)
if duplicate and self.default:
raise ValidationError('There can only be one default format style for each record')
except Style.DoesNotExist:
pass
"""
@property
def can_delete(self):
if not self.pk or self.name == Style.BUILTIN:
return False
return True
def delete(self, using=None):
if self.name == Style.BUILTIN:
raise ValidationError("Can not delete builtin style.")
else:
super(Style, self).delete(using)
def __unicode__(self):
return self.name
@receiver(pre_save, sender=Style)
def update_links(sender, instance, **kwargs):
link = Record.generate_style_link(instance)
links_parts = re.split("\t", link)
json_link = json.loads(links_parts[2])
present = False
style_links = instance.record.style_links
ows_links = instance.record.ows_links
if not instance.record.links:
instance.record.links = ''
for style_link in style_links:
parts = re.split("\t", style_link)
r = json.loads(parts[2])
if r['name'] == json_link['name'] and r['protocol'] == json_link['protocol']:
present = True
if not present:
style_links.append(link)
links = ows_links + style_links
Record.update_links(links, instance.record)
@receiver(post_delete, sender=Style)
def remove_style_links(sender, instance, **kwargs):
style_links = instance.record.style_links
ows_links = instance.record.ows_links
#remote deleted style's link
for link in style_links:
parts = re.split("\t", link)
r = json.loads(parts[2])
if r['name'] == instance.name and instance.format.lower() in r['protocol']:
style_links.remove(link)
links = ows_links + style_links
Record.update_links(links, instance.record)
@receiver(pre_save, sender=Style)
def set_default_style (sender, instance, **kwargs):
if getattr(instance,"triggered_default_style_setting",False):
return
update_fields=kwargs.get("update_fields", None)
if not instance.pk or not update_fields or "default" in update_fields:
if instance.default:
#The style will be set as the default style
cur_default_style = instance.record.default_style(instance.format)
if cur_default_style and cur_default_style.pk != instance.pk:
#The current default style is not the saving style, reset the current default style's default to false
cur_default_style.default=False
setattr(cur_default_style,"triggered_default_style_setting",True)
cur_default_style.save(update_fields=["default"])
#if default style is changed, set the latiest modifyed date
instance.record.modified = timezone.now()
instance.record.save(update_fields=["modified"])
else:
#The saving style is not the default style, try to set a default style if it does not exist
default_style = instance.record.setup_default_styles(instance.format)
if not default_style or default_style.pk == instance.pk:
#no default style is configured, set the current one as default style
instance.default = True
#if default style is changed, set the latiest modifyed date
instance.record.modified = timezone.now()
instance.record.save(update_fields=["modified"])
@receiver(post_delete, sender=Style)
def auto_remove_style_from_disk_on_delete(sender, instance, **kwargs):
""" Deletes the style file from disk when the
object is deleted
"""
if instance.default:
#deleted style is the default style, reset the default style
instance.record.setup_default_styles(instance.format)
if instance.content:
if os.path.isfile(instance.content.path):
os.remove(instance.content.path)
class Application(models.Model):
"""
Represent a application which can access wms, wfs, wcs service from geoserver
"""
name = models.CharField(max_length=255, validators=[validate_slug], unique=True, blank=False)
description = models.TextField(blank=True)
last_modify_time = models.DateTimeField(auto_now=True, null=False)
create_time = models.DateTimeField(auto_now_add=True, null=False)
records = models.ManyToManyField(Record)
@staticmethod
def get_view_name(app):
return "catalogue_record_{}".format(app)
@property
def records_view(self):
return Application.get_view_name(self.name)
class Meta:
ordering = ['name']
def __str__(self):
return self.name
class ApplicationEventListener(object):
@staticmethod
@receiver(pre_delete, sender=Application)
def _pre_delete(sender, instance, **args):
#remove the view for this application
try:
cursor = connection.cursor()
cursor.execute("DROP VIEW {} CASCADE".format(instance.records_view))
except:
#drop failed, maybe the view does not exist, ignore the exception
connection._rollback()
@staticmethod
@receiver(pre_save, sender=Application)
def _pre_save(sender, instance, **args):
#create a view for this application
try:
cursor = connection.cursor()
cursor.execute("CREATE OR REPLACE VIEW {} AS SELECT r.* FROM catalogue_application a join catalogue_applicationlayer l on a.id = l.application_id join catalogue_record r on l.layer_id = r.id WHERE a.name = '{}' and r.active order by l.order, r.identifier".format(instance.records_view, instance.name))
except Exception as e:
#create view failed
connection._rollback()
raise ValidationError(e)
class ApplicationLayer(models.Model):
"""
The relationship between application and layer
"""
application = models.ForeignKey(Application, blank=False, null=False)
layer = models.ForeignKey(Record, null=False, blank=False, limit_choices_to={"active":True})
order = models.PositiveIntegerField(blank=False, null=False)
def __str__(self):
return "{}:{}".format(self.application.name, self.layer.identifier)
class Meta:
unique_together = (('application', 'layer'))
ordering = ['application', 'order', 'layer']
|
rockychen-dpaw/oim-cms
|
catalogue/models.py
|
Python
|
apache-2.0
| 32,982
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import itertools
from enum import Enum
from typing import Set
from pants.backend.jvm.target_types import JarsField
from pants.backend.python.target_types import PythonRequirementsField
from pants.engine.addresses import Addresses
from pants.engine.console import Console
from pants.engine.goal import Goal, GoalSubsystem, LineOriented
from pants.engine.rules import goal_rule
from pants.engine.selectors import Get
from pants.engine.target import Dependencies as DependenciesField
from pants.engine.target import Targets, TransitiveTargets
from pants.util.ordered_set import FrozenOrderedSet
class DependencyType(Enum):
SOURCE = "source"
THIRD_PARTY = "3rdparty"
SOURCE_AND_THIRD_PARTY = "source-and-3rdparty"
# TODO(#8762) Get this rule to feature parity with the dependencies task.
class DependenciesOptions(LineOriented, GoalSubsystem):
"""List the dependencies of the input targets."""
name = "dependencies2"
@classmethod
def register_options(cls, register):
super().register_options(register)
register(
"--transitive",
default=False,
type=bool,
help=(
"List all transitive dependencies. If unspecified, list direct dependencies only."
),
)
register(
"--type",
type=DependencyType,
default=DependencyType.SOURCE,
help=(
"Which types of dependencies to list, where `source` means source code "
"dependencies and `3rdparty` means third-party requirements and JARs."
),
)
class Dependencies(Goal):
subsystem_cls = DependenciesOptions
@goal_rule
async def dependencies(
console: Console, addresses: Addresses, options: DependenciesOptions,
) -> Dependencies:
if options.values.transitive:
transitive_targets = await Get[TransitiveTargets](Addresses, addresses)
targets = Targets(transitive_targets.closure - FrozenOrderedSet(transitive_targets.roots))
else:
target_roots = await Get[Targets](Addresses, addresses)
targets = await Get[Targets](
Addresses(
itertools.chain.from_iterable(
tgt.get(DependenciesField).value or () for tgt in target_roots
)
)
)
include_3rdparty = options.values.type in [
DependencyType.THIRD_PARTY,
DependencyType.SOURCE_AND_THIRD_PARTY,
]
include_source = options.values.type in [
DependencyType.SOURCE,
DependencyType.SOURCE_AND_THIRD_PARTY,
]
address_strings = set()
third_party_requirements: Set[str] = set()
for tgt in targets:
if include_source:
address_strings.add(tgt.address.spec)
if include_3rdparty:
if tgt.has_field(PythonRequirementsField):
third_party_requirements.update(
str(python_req.requirement) for python_req in tgt[PythonRequirementsField].value
)
if tgt.has_field(JarsField):
third_party_requirements.update(
(
f"{jar.org}:{jar.name}:{jar.rev}"
if jar.rev is not None
else f"{jar.org}:{jar.name}"
)
for jar in tgt[JarsField].value
)
with options.line_oriented(console) as print_stdout:
for address in sorted(address_strings):
print_stdout(address)
for requirement_string in sorted(third_party_requirements):
print_stdout(requirement_string)
return Dependencies(exit_code=0)
def rules():
return [dependencies]
|
tdyas/pants
|
src/python/pants/backend/project_info/rules/dependencies.py
|
Python
|
apache-2.0
| 3,857
|
# -*- coding: utf-8 -*-
import datetime
import hashlib
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from application.extensions import db, bcrypt
from flask import current_app
from flask_login import UserMixin
from configs.enum import USER_GENDER, USER_ROLE, NOTI_TYPE
from configs import signals
__all__ = ['SocialOAuth', 'UserAccount', 'UserInformation', 'User', 'FavorAction']
class SocialOAuth(db.Document):
meta = {
'indexes': ['site', 'user', ('site_uid', 'site'), 'unionid']
}
app = db.StringField(choices=['IOS', 'MOBILEWEB'], default='MOBILEWEB')
site = db.StringField(max_length=255, required=True)
site_uid = db.StringField(max_length=255, required=True, unique_with='site')
unionid = db.StringField()
user = db.ReferenceField('User')
site_uname = db.StringField(max_length=255)
access_token = db.StringField(required=True)
expire_date = db.DateTimeField()
refresh_token = db.StringField()
# wether we can get information of this oauth
can_refresh = db.BooleanField(default=True)
last_active_date = db.DateTimeField()
def to_json(self):
return dict(site=self.site, site_uname=self.site_uname)
@classmethod
def create(cls, site, site_uid, site_uname, access_token, expires_in=0,
refresh_token=None, email=None, mobile_number=None, gender=None,
password=None, unionid=None, app='MOBILEWEB',
is_email_verified=False):
""" create an oauth record and an user"""
oauth = cls(site=site, site_uid=site_uid, site_uname=site_uname,
access_token=access_token,
refresh_token=refresh_token, unionid=unionid, app=app)
if not email:
email = '{}-{}@maybi.cn'.format(
site, hashlib.md5((app+site+site_uid).encode('utf-8')).hexdigest())
# create an user
user = User.create(email=email, mobile_number=mobile_number,
password=password or site_uname, name=site_uname)
user.account.is_email_verified = is_email_verified
user.information.gender = gender
if site == 'wechat':
user.subscribed_mp = True
user.save()
oauth.user = user
oauth.save()
oauth.update_token(access_token, expires_in)
return oauth
def update_token(self, access_token, expires_in=0):
expire_date = datetime.datetime.utcnow() + datetime.timedelta(
seconds=int(expires_in))
self.update(set__access_token=access_token,
set__expire_date=expire_date)
def re_auth(self, access_token, expires_in, refresh_token=None,
unionid=None):
self.update_token(access_token, expires_in)
self.update(set__refresh_token=refresh_token)
if unionid:
self.update(set__unionid=unionid)
def update_avatar(self, url):
self.user.update(set__avatar_url=url)
@classmethod
def get_user(cls, site, site_uid):
so = cls.objects(site=site, site_uid=site_uid).first()
return so.user if so else None
@classmethod
def refresh_active(cls, site, site_uid, dt):
# ignore if document does not exist
cls.objects(site=site, site_uid=site_uid).update_one(
set__last_active_date=dt)
class UserInformation(db.EmbeddedDocument):
gender = db.StringField(max_length=1, choices=USER_GENDER)
class UserAccount(db.EmbeddedDocument):
'''
The UserAccount class contains user personal informations
and account settings
'''
created_at = db.DateTimeField(default=datetime.datetime.utcnow,
required=True)
# login related
email = db.EmailField(required=True, unique=True)
mobile_number = db.StringField()
is_email_verified = db.BooleanField(default=False)
_password = db.StringField(max_length=255)
activation_key = db.StringField(max_length=255)
activate_key_expire_date = db.DateTimeField()
# ===============================================
# password
@property
def password(self):
return self._password
@password.setter
def password(self, password):
self._password = bcrypt.generate_password_hash(password).decode('utf-8')
print (self._password)
def check_password(self, password):
if self.password is None:
return False
return bcrypt.check_password_hash(self.password, password)
def to_json(self):
return dict(created_at=str(self.created_at),
email=self.email)
class FavorAction(object):
'''
expecting the document class has the following fields:
num_favors = db.IntField(default=0, min_value=0)
favor_items = db.ListField(db.StringField())
'''
def mark_favored(self, item):
if item.id not in self.favor_items:
item.update(inc__num_favors=1)
self.update(inc__num_favors=1, push__favor_items=item.id)
signals.mark_favor.send(self, item_id=str(item.id))
def unmark_favored(self, item):
if item.id in self.favor_items:
item.update(dec__num_favors=1)
self.update(dec__num_favors=1, pull__favor_items=item.id)
def mark_like(self, post):
post.update(inc__num_likes=1)
self.update(inc__num_post_likes=1)
def mark_unlike(self, post):
post.update(dec__num_likes=1)
self.update(dec__num_post_likes=1)
class User(db.Document, UserMixin, FavorAction):
'''
The User class contains only basic and frequently used information
Superclass UserMixin can provide four authenticate methods required
by Flask-Login.
'''
meta = {
'indexes': ['name', 'account.created_at', 'roles', 'level',
'account.email',
'account.is_email_verified', 'is_deleted'],
'ordering': ['-account.created_at']
}
name = db.StringField(required=True)
account = db.EmbeddedDocumentField('UserAccount')
information = db.EmbeddedDocumentField('UserInformation')
avatar_url = db.URLField(default='http://assets.maybi.cn/logo/panda.jpg')
# level
# 0: normal user
# 1: normal member; 2: advance member
# 3: premium member; 4: VIP member
level = db.IntField(default=0)
roles = db.ListField(db.StringField())
addresses = db.ListField(db.ReferenceField('Address'))
default_address = db.ReferenceField('Address')
# followers
num_followers = db.IntField(default=0, min_value=0)
num_followings = db.IntField(default=0, min_value=0)
followers = db.ListField(db.ReferenceField('User'))
followings = db.ListField(db.ReferenceField('User'))
# whether subscribed our wechat account
subscribed_mp = db.BooleanField(default=False)
# favor related (item_ids)
num_favors = db.IntField(default=0, min_value=0)
favor_items = db.ListField(db.IntField())
# favor related (post_ids)
num_post_likes = db.IntField(default=0, min_value=0)
like_posts = db.ListField(db.IntField())
# shopping cart
cart = db.ReferenceField('Cart')
# wallet
wallet = db.ReferenceField('CouponWallet')
is_deleted = db.BooleanField(default=False)
deleted_date = db.DateTimeField()
def __unicode__(self):
return '%s' % str(self.id)
#return u'{}'.format(self.name)
@property
def coin_wallet(self):
import application.models as Models
return Models.CoinWallet.get_or_create(user=self)
@property
def hongbao_wallet(self):
import application.models as Models
return Models.HongbaoWallet.by_user(user=self)
@property
def orders(self):
import application.models as Models
return Models.Order.objects(customer_id=self.id, is_paid=True)
@property
def avatar_thumb(self):
return self.avatar_url[:23] + 'avatar_thumbs/80x80/' + self.avatar_url[23:]
def used_coupon(self, code):
import application.models as Models
return bool(Models.Order.objects(customer_id=self.id, is_paid=True,
coupon__codes__contains=code))
@db.queryset_manager
def active(doc_cls, queryset):
return queryset.filter(is_deleted=False)
@property
def is_admin(self):
return USER_ROLE.ADMIN in self.roles
# Follow / Following
def follow(self, other):
if self not in other.followers:
other.followers.append(self)
other.num_followers += 1
if other not in self.followings:
self.followings.append(other)
self.num_followings += 1
self.save()
other.save()
signals.site_message.send(self,
dest=other.id,
source=self,
imgs=[self.avatar_url],
noti_type=NOTI_TYPE.FOLLOW,
title='')
def unfollow(self, other):
if self in other.followers:
other.followers.remove(self)
other.num_followers -= 1
if other in self.followings:
self.followings.remove(other)
self.num_followings -= 1
self.save()
other.save()
def is_following(self, other):
return other in self.followings
def to_json(self):
data = dict(name=self.name,
avatar_url=self.avatar_url,
avatar_thumb=self.avatar_thumb,
num_followers=self.num_followers,
num_followings=self.num_followings,
created_at=str(self.account.created_at),
id=str(self.id)
)
return data
@classmethod
def authenticate(cls, email=None, password=None):
if email:
user = cls.active(account__email=email.lower()).first()
else:
user = None
if user:
authenticated = user.account.check_password(password)
else:
authenticated = False
return user, authenticated
def generate_auth_token(self, expires_in=604800):
s = Serializer(current_app.config['SECRET_KEY'], expires_in=expires_in)
return s.dumps({'id': str(self.id)}).decode('utf-8')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.objects(id=data['id']).first()
@classmethod
def create(cls, email, password, name, mobile_number=None):
from application.models.coupon.wallet import CouponWallet
from application.models.cart import Cart
# init user account.
cart = Cart()
cart.save()
wallet = CouponWallet()
wallet.save()
# account
account = UserAccount(email=email.lower(),
mobile_number=mobile_number,
is_email_verified=True)
account.password = password
user = User(name=name,
roles=[USER_ROLE.MEMBER],
information=UserInformation(),
cart=cart,
wallet=wallet,
account=account)
user.save()
signals.user_signup.send('system', user=user)
return user
def mark_deleted(self):
if self.is_deleted:
return
# delete social oauth, otherwise user can still login via wechat
SocialOAuth.objects(user=self).delete()
self.is_deleted = True
self.deleted_date = datetime.datetime.utcnow()
self.save()
|
seasonstar/bibi
|
application/models/user/user.py
|
Python
|
apache-2.0
| 11,775
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import strutils
import webob.exc
import trove.common.apischema as apischema
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.common import pagination
from trove.common.utils import correct_id_with_req
from trove.common import wsgi
from trove.extensions.mysql.common import populate_users
from trove.extensions.mysql.common import populate_validated_databases
from trove.extensions.mysql.common import unquote_user_host
from trove.extensions.mysql import models
from trove.extensions.mysql import views
from trove.guestagent.db import models as guest_models
LOG = logging.getLogger(__name__)
import_class = importutils.import_class
CONF = cfg.CONF
class UserController(wsgi.Controller):
"""Controller for instance functionality."""
schemas = apischema.user
@classmethod
def get_schema(cls, action, body):
action_schema = super(UserController, cls).get_schema(action, body)
if 'update_all' == action:
update_type = body.keys()[0]
action_schema = action_schema.get(update_type, {})
return action_schema
def index(self, req, tenant_id, instance_id):
"""Return all users."""
LOG.info(_("Listing users for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
users, next_marker = models.Users.load(context, instance_id)
view = views.UsersView(users)
paged = pagination.SimplePaginatedDataView(req.url, 'users', view,
next_marker)
return wsgi.Result(paged.data(), 200)
def create(self, req, body, tenant_id, instance_id):
"""Creates a set of users."""
LOG.info(_("Creating users for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % strutils.mask_password(req))
LOG.info(_("body : '%s'\n\n") % strutils.mask_password(body))
context = req.environ[wsgi.CONTEXT_KEY]
users = body['users']
try:
model_users = populate_users(users)
models.User.create(context, instance_id, model_users)
except (ValueError, AttributeError) as e:
raise exception.BadRequest(msg=str(e))
return wsgi.Result(None, 202)
def delete(self, req, tenant_id, instance_id, id):
LOG.info(_("Deleting user for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
id = correct_id_with_req(id, req)
username, host = unquote_user_host(id)
user = None
try:
user = guest_models.MySQLUser()
user.name = username
user.host = host
found_user = models.User.load(context, instance_id, username,
host)
if not found_user:
user = None
except (ValueError, AttributeError) as e:
raise exception.BadRequest(msg=str(e))
if not user:
raise exception.UserNotFound(uuid=id)
models.User.delete(context, instance_id, user.serialize())
return wsgi.Result(None, 202)
def show(self, req, tenant_id, instance_id, id):
"""Return a single user."""
LOG.info(_("Showing a user for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
id = correct_id_with_req(id, req)
username, host = unquote_user_host(id)
user = None
try:
user = models.User.load(context, instance_id, username, host)
except (ValueError, AttributeError) as e:
raise exception.BadRequest(msg=str(e))
if not user:
raise exception.UserNotFound(uuid=id)
view = views.UserView(user)
return wsgi.Result(view.data(), 200)
def update(self, req, body, tenant_id, instance_id, id):
"""Change attributes for one user."""
LOG.info(_("Updating user attributes for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % strutils.mask_password(req))
context = req.environ[wsgi.CONTEXT_KEY]
id = correct_id_with_req(id, req)
username, hostname = unquote_user_host(id)
user = None
user_attrs = body['user']
try:
user = models.User.load(context, instance_id, username, hostname)
except (ValueError, AttributeError) as e:
raise exception.BadRequest(msg=str(e))
if not user:
raise exception.UserNotFound(uuid=id)
try:
models.User.update_attributes(context, instance_id, username,
hostname, user_attrs)
except (ValueError, AttributeError) as e:
raise exception.BadRequest(msg=str(e))
return wsgi.Result(None, 202)
def update_all(self, req, body, tenant_id, instance_id):
"""Change the password of one or more users."""
LOG.info(_("Updating user passwords for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % strutils.mask_password(req))
context = req.environ[wsgi.CONTEXT_KEY]
users = body['users']
model_users = []
for user in users:
try:
mu = guest_models.MySQLUser()
mu.name = user['name']
mu.host = user.get('host')
mu.password = user['password']
found_user = models.User.load(context, instance_id,
mu.name, mu.host)
if not found_user:
user_and_host = mu.name
if mu.host:
user_and_host += '@' + mu.host
raise exception.UserNotFound(uuid=user_and_host)
model_users.append(mu)
except (ValueError, AttributeError) as e:
raise exception.BadRequest(msg=str(e))
models.User.change_password(context, instance_id, model_users)
return wsgi.Result(None, 202)
class UserAccessController(wsgi.Controller):
"""Controller for adding and removing database access for a user."""
schemas = apischema.user
@classmethod
def get_schema(cls, action, body):
schema = {}
if 'update_all' == action:
schema = cls.schemas.get(action).get('databases')
return schema
def _get_user(self, context, instance_id, user_id):
username, hostname = unquote_user_host(user_id)
try:
user = models.User.load(context, instance_id, username, hostname)
except (ValueError, AttributeError) as e:
raise exception.BadRequest(msg=str(e))
if not user:
raise exception.UserNotFound(uuid=user_id)
return user
def index(self, req, tenant_id, instance_id, user_id):
"""Show permissions for the given user."""
LOG.info(_("Showing user access for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
# Make sure this user exists.
user_id = correct_id_with_req(user_id, req)
user = self._get_user(context, instance_id, user_id)
if not user:
LOG.error(_("No such user: %(user)s ") % {'user': user})
raise exception.UserNotFound(uuid=user)
username, hostname = unquote_user_host(user_id)
access = models.User.access(context, instance_id, username, hostname)
view = views.UserAccessView(access.databases)
return wsgi.Result(view.data(), 200)
def update(self, req, body, tenant_id, instance_id, user_id):
"""Grant access for a user to one or more databases."""
LOG.info(_("Granting user access for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
user_id = correct_id_with_req(user_id, req)
user = self._get_user(context, instance_id, user_id)
if not user:
LOG.error(_("No such user: %(user)s ") % {'user': user})
raise exception.UserNotFound(uuid=user)
username, hostname = unquote_user_host(user_id)
databases = [db['name'] for db in body['databases']]
models.User.grant(context, instance_id, username, hostname, databases)
return wsgi.Result(None, 202)
def delete(self, req, tenant_id, instance_id, user_id, id):
"""Revoke access for a user."""
LOG.info(_("Revoking user access for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
user_id = correct_id_with_req(user_id, req)
user = self._get_user(context, instance_id, user_id)
if not user:
LOG.error(_("No such user: %(user)s ") % {'user': user})
raise exception.UserNotFound(uuid=user)
username, hostname = unquote_user_host(user_id)
access = models.User.access(context, instance_id, username, hostname)
databases = [db.name for db in access.databases]
if id not in databases:
raise exception.DatabaseNotFound(uuid=id)
models.User.revoke(context, instance_id, username, hostname, id)
return wsgi.Result(None, 202)
class SchemaController(wsgi.Controller):
"""Controller for instance functionality."""
schemas = apischema.dbschema
def index(self, req, tenant_id, instance_id):
"""Return all schemas."""
LOG.info(_("Listing schemas for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
schemas, next_marker = models.Schemas.load(context, instance_id)
view = views.SchemasView(schemas)
paged = pagination.SimplePaginatedDataView(req.url, 'databases', view,
next_marker)
return wsgi.Result(paged.data(), 200)
def create(self, req, body, tenant_id, instance_id):
"""Creates a set of schemas."""
LOG.info(_("Creating schema for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
LOG.info(_("body : '%s'\n\n") % body)
context = req.environ[wsgi.CONTEXT_KEY]
schemas = body['databases']
model_schemas = populate_validated_databases(schemas)
models.Schema.create(context, instance_id, model_schemas)
return wsgi.Result(None, 202)
def delete(self, req, tenant_id, instance_id, id):
LOG.info(_("Deleting schema for instance '%s'") % instance_id)
LOG.info(_("req : '%s'\n\n") % req)
context = req.environ[wsgi.CONTEXT_KEY]
try:
schema = guest_models.ValidatedMySQLDatabase()
schema.name = id
models.Schema.delete(context, instance_id, schema.serialize())
except (ValueError, AttributeError) as e:
raise exception.BadRequest(msg=str(e))
return wsgi.Result(None, 202)
def show(self, req, tenant_id, instance_id, id):
raise webob.exc.HTTPNotImplemented()
|
fabian4/trove
|
trove/extensions/mysql/service.py
|
Python
|
apache-2.0
| 11,928
|
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pkg_resources
def test_requirements():
pkg_resources.require('fuel-ostf')
|
eayunstack/fuel-ostf
|
fuel_plugin/testing/tests/unit/test_requirements.py
|
Python
|
apache-2.0
| 697
|
from abc import ABCMeta, abstractmethod
from os import path
from utils import is_file_extension
class BaseStrategy(object):
__metaclass__ = ABCMeta
__extensions = []
__language = ""
# extensions: an array of all file extensions
# language: languege name
def __init__(self, extensions, language):
self.__extensions = extensions
self.__language = language
@abstractmethod
def execute(self):
pass
def is_language(self, filename):
return is_file_extension(filename, self.__extensions)
def get_name(self):
return self.__language
def get_extensions(self):
return self.__extensions
|
nghiattran/pm
|
languages/__init__.py
|
Python
|
apache-2.0
| 672
|
# !/usr/bin/python
# -*- coding: utf-8 -*-
from ReadWeibo.mainapp.models import Category, Weibo, Comment
from ReadWeibo.account.models import Account
from main import Config
import DataUtil as du
import networkx as nx
import numpy as np
import math, random, operator, csv
import logging, sys, os
import copy
class TopicalWordRank:
def __init__(self, graph, max_iter=20):
self.max_iter = max_iter
self.graph = graph
self.ntopics = 25
self.ranks = dict()
self.p = dict()
def gibbs_lda(self, fdir, ntopics=75, niters=1000, savestep=500, twords=20):
self.ntopics = ntopics
logging.info("lda topic modeling")
cmd = ["/home/plex/wksp/projects/GibbsLDA++-0.2/src/lda -est", " -ntopics %s " % ntopics,
" -niters %s " % niters, "-savestep %s " % savestep,
"-twords %s " % twords, "-dfile %s/lda.train" % fdir]
logging.info(u''.join(cmd))
os.system(u''.join(cmd))
logging.info("Loading lda result")
self.p = du.load_lda_model(fdir, ntopics)
self.ranks = copy.deepcopy(self.p)
def rank(self, lam=0.85):
ntopics = self.ntopics
graph = self.graph
q={}
for nod, info in graph.nodes(data=True):
weight = .0
if 'weight' in info:
weight = info['weight']
q[nod.decode('utf-8')] = np.array([weight]*ntopics)
for _iter in range(self.max_iter):
logging.info(u'TopicalWordRank iter : %s' % _iter)
for key, node in graph.nodes(data=True):
rank_sum = np.array([.0]*ntopics)
for nei in graph[key]:
weight = graph[key][nei]['weight']
weight_sum = .0
for out in graph[nei]:
weight_sum += graph[nei][out]['weight']
#TODO
if type(nei)==str:
nei = nei.decode('utf-8')
rank_sum += (weight / weight_sum) * self.ranks[nei]
#TODO
if type(key)==str:
key = key.decode('utf-8')
self.ranks[key] = lam*rank_sum + (1 - lam)*(self.p[key]+q[key])
def save(self, fpath, with_log=True):
with open(fpath, 'w') as resf:
for key in self.ranks:
result = u"%s\t%s\n" % (key, [val for val in self.ranks[key]])
if with_log:
logging.info(result)
resf.write(result.encode('utf-8'))
if __name__ == '__main__':
fdir = sys.argv[1]
#G = du.gen_data(graph_path=u'%s/graph.yaml' % fdir,
# lda_path=u'%s/lda.train' % fdir,
# user_lim=2, user_wb_lim=20)
G = du.gen_data(graph_path=u'%s/graph.yaml' % fdir,
lda_path=u'%s/lda.train' % fdir,
user_lim=200, user_wb_lim=200)
#G = du.load_graph(u'%s/graph.yaml' % fdir)
job = TopicalWordRank(G)
job.gibbs_lda(fdir, ntopics=50, niters=1000, savestep=1000, twords=20)
#job.gibbs_lda(fdir, ntopics=2, niters=10, savestep=10, twords=20)
job.rank()
job.save(u'%s/result.txt' % fdir)
pass
|
wfwei/ReadWeibo
|
topical/TopicalWordRank.py
|
Python
|
apache-2.0
| 3,220
|
import warnings
from contextlib import contextmanager
from distutils import version
import re
import importlib
from unittest import mock
import numpy as np
from numpy.testing import assert_array_equal # noqa: F401
from xarray.core.duck_array_ops import allclose_or_equiv # noqa
import pytest
from xarray.core import utils
from xarray.core.options import set_options
from xarray.core.indexing import ExplicitlyIndexed
import xarray.testing
from xarray.plot.utils import import_seaborn
try:
from pandas.testing import assert_frame_equal
except ImportError:
# old location, for pandas < 0.20
from pandas.util.testing import assert_frame_equal # noqa: F401
# import mpl and change the backend before other mpl imports
try:
import matplotlib as mpl
# Order of imports is important here.
# Using a different backend makes Travis CI work
mpl.use('Agg')
except ImportError:
pass
def _importorskip(modname, minversion=None):
try:
mod = importlib.import_module(modname)
has = True
if minversion is not None:
if LooseVersion(mod.__version__) < LooseVersion(minversion):
raise ImportError('Minimum version not satisfied')
except ImportError:
has = False
func = pytest.mark.skipif(not has, reason='requires {}'.format(modname))
return has, func
def LooseVersion(vstring):
# Our development version is something like '0.10.9+aac7bfc'
# This function just ignored the git commit id.
vstring = vstring.split('+')[0]
return version.LooseVersion(vstring)
has_matplotlib, requires_matplotlib = _importorskip('matplotlib')
has_matplotlib2, requires_matplotlib2 = _importorskip('matplotlib',
minversion='2')
has_scipy, requires_scipy = _importorskip('scipy')
has_pydap, requires_pydap = _importorskip('pydap.client')
has_netCDF4, requires_netCDF4 = _importorskip('netCDF4')
has_h5netcdf, requires_h5netcdf = _importorskip('h5netcdf')
has_pynio, requires_pynio = _importorskip('Nio')
has_pseudonetcdf, requires_pseudonetcdf = _importorskip('PseudoNetCDF')
has_cftime, requires_cftime = _importorskip('cftime')
has_nc_time_axis, requires_nc_time_axis = _importorskip('nc_time_axis',
minversion='1.2.0')
has_cftime_1_0_2_1, requires_cftime_1_0_2_1 = _importorskip(
'cftime', minversion='1.0.2.1')
has_dask, requires_dask = _importorskip('dask')
has_bottleneck, requires_bottleneck = _importorskip('bottleneck')
has_rasterio, requires_rasterio = _importorskip('rasterio')
has_pathlib, requires_pathlib = _importorskip('pathlib')
has_zarr, requires_zarr = _importorskip('zarr', minversion='2.2')
has_np113, requires_np113 = _importorskip('numpy', minversion='1.13.0')
has_iris, requires_iris = _importorskip('iris')
has_cfgrib, requires_cfgrib = _importorskip('cfgrib')
# some special cases
has_h5netcdf07, requires_h5netcdf07 = _importorskip('h5netcdf',
minversion='0.7')
has_h5py29, requires_h5py29 = _importorskip('h5py', minversion='2.9.0')
has_h5fileobj = has_h5netcdf07 and has_h5py29
requires_h5fileobj = pytest.mark.skipif(
not has_h5fileobj, reason='requires h5py>2.9.0 & h5netcdf>0.7')
has_scipy_or_netCDF4 = has_scipy or has_netCDF4
requires_scipy_or_netCDF4 = pytest.mark.skipif(
not has_scipy_or_netCDF4, reason='requires scipy or netCDF4')
has_cftime_or_netCDF4 = has_cftime or has_netCDF4
requires_cftime_or_netCDF4 = pytest.mark.skipif(
not has_cftime_or_netCDF4, reason='requires cftime or netCDF4')
if not has_pathlib:
has_pathlib, requires_pathlib = _importorskip('pathlib2')
try:
import_seaborn()
has_seaborn = True
except ImportError:
has_seaborn = False
requires_seaborn = pytest.mark.skipif(not has_seaborn,
reason='requires seaborn')
# change some global options for tests
set_options(warn_for_unclosed_files=True)
if has_dask:
import dask
if LooseVersion(dask.__version__) < '0.18':
dask.set_options(get=dask.get)
else:
dask.config.set(scheduler='single-threaded')
# pytest config
try:
_SKIP_FLAKY = not pytest.config.getoption("--run-flaky")
_SKIP_NETWORK_TESTS = not pytest.config.getoption("--run-network-tests")
except (ValueError, AttributeError):
# Can't get config from pytest, e.g., because xarray is installed instead
# of being run from a development version (and hence conftests.py is not
# available). Don't run flaky tests.
_SKIP_FLAKY = True
_SKIP_NETWORK_TESTS = True
flaky = pytest.mark.skipif(
_SKIP_FLAKY, reason="set --run-flaky option to run flaky tests")
network = pytest.mark.skipif(
_SKIP_NETWORK_TESTS,
reason="set --run-network-tests option to run tests requiring an "
"internet connection")
@contextmanager
def raises_regex(error, pattern):
__tracebackhide__ = True # noqa: F841
with pytest.raises(error) as excinfo:
yield
message = str(excinfo.value)
if not re.search(pattern, message):
raise AssertionError('exception %r did not match pattern %r'
% (excinfo.value, pattern))
class UnexpectedDataAccess(Exception):
pass
class InaccessibleArray(utils.NDArrayMixin, ExplicitlyIndexed):
def __init__(self, array):
self.array = array
def __getitem__(self, key):
raise UnexpectedDataAccess("Tried accessing data")
class ReturnItem(object):
def __getitem__(self, key):
return key
class IndexerMaker(object):
def __init__(self, indexer_cls):
self._indexer_cls = indexer_cls
def __getitem__(self, key):
if not isinstance(key, tuple):
key = (key,)
return self._indexer_cls(key)
def source_ndarray(array):
"""Given an ndarray, return the base object which holds its memory, or the
object itself.
"""
with warnings.catch_warnings():
warnings.filterwarnings('ignore', 'DatetimeIndex.base')
warnings.filterwarnings('ignore', 'TimedeltaIndex.base')
base = getattr(array, 'base', np.asarray(array).base)
if base is None:
base = array
return base
# Internal versions of xarray's test functions that validate additional
# invariants
# TODO: add more invariant checks.
def assert_equal(a, b):
xarray.testing.assert_equal(a, b)
xarray.testing._assert_indexes_invariants(a)
xarray.testing._assert_indexes_invariants(b)
def assert_identical(a, b):
xarray.testing.assert_identical(a, b)
xarray.testing._assert_indexes_invariants(a)
xarray.testing._assert_indexes_invariants(b)
def assert_allclose(a, b, **kwargs):
xarray.testing.assert_allclose(a, b, **kwargs)
xarray.testing._assert_indexes_invariants(a)
xarray.testing._assert_indexes_invariants(b)
|
chunweiyuan/xarray
|
xarray/tests/__init__.py
|
Python
|
apache-2.0
| 6,863
|
#
# Copyright 2015, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This file contains the base N1QL implementation for async frameworks.
"""
from couchbase.n1ql import N1QLRequest
from couchbase.async.rowsbase import AsyncRowsBase
class AsyncN1QLRequest(AsyncRowsBase, N1QLRequest):
def __init__(self, *args, **kwargs):
N1QLRequest.__init__(self, *args, **kwargs)
|
mnunberg/couchbase-python-client
|
couchbase/async/n1ql.py
|
Python
|
apache-2.0
| 922
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import ddt
import mock
from rally.task.processing import plot
from tests.unit import test
PLOT = "rally.task.processing.plot."
@ddt.ddt
class PlotTestCase(test.TestCase):
@mock.patch(PLOT + "charts")
def test__process_scenario(self, mock_charts):
for mock_ins, ret in [
(mock_charts.MainStatsTable, "main_stats"),
(mock_charts.MainStackedAreaChart, "main_stacked"),
(mock_charts.AtomicStackedAreaChart, "atomic_stacked"),
(mock_charts.OutputStackedAreaDeprecatedChart,
"output_stacked"),
(mock_charts.LoadProfileChart, "load_profile"),
(mock_charts.MainHistogramChart, "main_histogram"),
(mock_charts.AtomicHistogramChart, "atomic_histogram"),
(mock_charts.AtomicAvgChart, "atomic_avg")]:
setattr(mock_ins.return_value.render, "return_value", ret)
iterations = [
{"timestamp": i + 2, "error": [],
"duration": i + 5, "idle_duration": i,
"output": {"additive": [], "complete": []},
"atomic_actions": {"foo_action": i + 10}} for i in range(10)]
data = {"iterations": iterations, "sla": [],
"key": {"kw": {"runner": {"type": "constant"}},
"name": "Foo.bar", "pos": 0},
"info": {"atomic": {"foo_action": {"max_duration": 19,
"min_duration": 10}},
"full_duration": 40, "load_duration": 32,
"iterations_count": 10, "iterations_passed": 10,
"max_duration": 14, "min_duration": 5,
"output_names": [],
"tstamp_end": 25, "tstamp_start": 2}}
task_data = plot._process_scenario(data, 1)
self.assertEqual(
task_data, {
"cls": "Foo", "met": "bar", "name": "bar [2]", "pos": "1",
"runner": "constant", "config": json.dumps(
{"Foo.bar": [{"runner": {"type": "constant"}}]},
indent=2),
"full_duration": 40, "load_duration": 32,
"atomic": {"histogram": "atomic_histogram",
"iter": "atomic_stacked", "pie": "atomic_avg"},
"iterations": {"histogram": "main_histogram",
"iter": "main_stacked",
"pie": [("success", 10), ("errors", 0)]},
"iterations_count": 10, "errors": [],
"load_profile": "load_profile",
"additive_output": [],
"complete_output": [[], [], [], [], [], [], [], [], [], []],
"output_errors": [],
"sla": [], "sla_success": True, "table": "main_stats"})
@mock.patch(PLOT + "_process_scenario")
@mock.patch(PLOT + "json.dumps", return_value="json_data")
def test__process_tasks(self, mock_json_dumps, mock__process_scenario):
tasks_results = [{"key": {"name": i, "kw": "kw_" + i}}
for i in ("a", "b", "c", "b")]
mock__process_scenario.side_effect = lambda a, b: (
{"cls": "%s_cls" % a["key"]["name"],
"name": str(b),
"met": "dummy",
"pos": str(b)})
source, tasks = plot._process_tasks(tasks_results)
self.assertEqual(source, "json_data")
mock_json_dumps.assert_called_once_with(
{"a": ["kw_a"], "b": ["kw_b", "kw_b"], "c": ["kw_c"]},
sort_keys=True, indent=2)
self.assertEqual(
tasks,
[{"cls": "a_cls", "met": "dummy", "name": "0", "pos": "0"},
{"cls": "b_cls", "met": "dummy", "name": "0", "pos": "0"},
{"cls": "b_cls", "met": "dummy", "name": "1", "pos": "1"},
{"cls": "c_cls", "met": "dummy", "name": "0", "pos": "0"}])
@ddt.data({},
{"include_libs": True},
{"include_libs": False})
@ddt.unpack
@mock.patch(PLOT + "_process_tasks")
@mock.patch(PLOT + "_extend_results")
@mock.patch(PLOT + "ui_utils.get_template")
@mock.patch(PLOT + "json.dumps", side_effect=lambda s: "json_" + s)
@mock.patch("rally.common.version.version_string", return_value="42.0")
def test_plot(self, mock_version_string, mock_dumps, mock_get_template,
mock__extend_results, mock__process_tasks, **ddt_kwargs):
mock__process_tasks.return_value = "source", "scenarios"
mock_get_template.return_value.render.return_value = "tasks_html"
mock__extend_results.return_value = ["extended_result"]
html = plot.plot("tasks_results", **ddt_kwargs)
self.assertEqual(html, "tasks_html")
mock__extend_results.assert_called_once_with("tasks_results")
mock_get_template.assert_called_once_with("task/report.html")
mock__process_tasks.assert_called_once_with(["extended_result"])
if "include_libs" in ddt_kwargs:
mock_get_template.return_value.render.assert_called_once_with(
version="42.0", data="json_scenarios", source="json_source",
include_libs=ddt_kwargs["include_libs"])
else:
mock_get_template.return_value.render.assert_called_once_with(
version="42.0", data="json_scenarios", source="json_source",
include_libs=False)
@mock.patch(PLOT + "objects.Task.extend_results")
def test__extend_results(self, mock_task_extend_results):
mock_task_extend_results.side_effect = iter(
[["extended_foo"], ["extended_bar"], ["extended_spam"]])
tasks_results = [
{"key": "%s_key" % k, "sla": "%s_sla" % k,
"full_duration": "%s_full_duration" % k,
"load_duration": "%s_load_duration" % k,
"result": "%s_result" % k} for k in ("foo", "bar", "spam")]
generic_results = [
{"id": None, "created_at": None, "updated_at": None,
"task_uuid": None, "key": "%s_key" % k,
"data": {"raw": "%s_result" % k,
"full_duration": "%s_full_duration" % k,
"load_duration": "%s_load_duration" % k,
"sla": "%s_sla" % k}} for k in ("foo", "bar", "spam")]
results = plot._extend_results(tasks_results)
self.assertEqual([mock.call([r]) for r in generic_results],
mock_task_extend_results.mock_calls)
self.assertEqual(["extended_foo", "extended_bar", "extended_spam"],
results)
def test__extend_results_empty(self):
self.assertEqual([], plot._extend_results([]))
@mock.patch(PLOT + "Trends")
@mock.patch(PLOT + "ui_utils.get_template")
@mock.patch(PLOT + "_extend_results")
@mock.patch("rally.common.version.version_string", return_value="42.0")
def test_trends(self, mock_version_string, mock__extend_results,
mock_get_template, mock_trends):
mock__extend_results.return_value = ["foo", "bar"]
trends = mock.Mock()
trends.get_data.return_value = ["foo", "bar"]
mock_trends.return_value = trends
template = mock.Mock()
template.render.return_value = "trends html"
mock_get_template.return_value = template
self.assertEqual("trends html", plot.trends("tasks_results"))
self.assertEqual([mock.call("foo"), mock.call("bar")],
trends.add_result.mock_calls)
mock_get_template.assert_called_once_with("task/trends.html")
template.render.assert_called_once_with(version="42.0",
data="[\"foo\", \"bar\"]")
@ddt.ddt
class TrendsTestCase(test.TestCase):
def test___init__(self):
trends = plot.Trends()
self.assertEqual({}, trends._tasks)
self.assertRaises(TypeError, plot.Trends, 42)
@ddt.data({"args": [None], "result": "None"},
{"args": [""], "result": ""},
{"args": [" str value "], "result": "str value"},
{"args": [" 42 "], "result": "42"},
{"args": ["42"], "result": "42"},
{"args": [42], "result": "42"},
{"args": [42.00], "result": "42.0"},
{"args": [[3.2, 1, " foo ", None]], "result": "1,3.2,None,foo"},
{"args": [(" def", "abc", [22, 33])], "result": "22,33,abc,def"},
{"args": [{}], "result": ""},
{"args": [{1: 2, "a": " b c "}], "result": "1:2|a:b c"},
{"args": [{"foo": "bar", (1, 2): [5, 4, 3]}],
"result": "1,2:3,4,5|foo:bar"},
{"args": [1, 2], "raises": TypeError},
{"args": [set()], "raises": TypeError})
@ddt.unpack
def test__to_str(self, args, result=None, raises=None):
trends = plot.Trends()
if raises:
self.assertRaises(raises, trends._to_str, *args)
else:
self.assertEqual(result, trends._to_str(*args))
@mock.patch(PLOT + "hashlib")
def test__make_hash(self, mock_hashlib):
mock_hashlib.md5.return_value.hexdigest.return_value = "md5_digest"
trends = plot.Trends()
trends._to_str = mock.Mock()
trends._to_str.return_value.encode.return_value = "foo_str"
self.assertEqual("md5_digest", trends._make_hash("foo_obj"))
trends._to_str.assert_called_once_with("foo_obj")
trends._to_str.return_value.encode.assert_called_once_with("utf8")
mock_hashlib.md5.assert_called_once_with("foo_str")
def _make_result(self, salt, sla_success=True, with_na=False):
if with_na:
atomic = {"a": "n/a", "b": "n/a"}
stat_rows = [
["a", "n/a", "n/a", "n/a", "n/a", "n/a", "n/a", "n/a", 4],
["b", "n/a", "n/a", "n/a", "n/a", "n/a", "n/a", "n/a", 4],
["total", "n/a", "n/a", "n/a", "n/a", "n/a", "n/a", "n/a", 4]]
else:
atomic = {"a": 123, "b": 456}
stat_rows = [["a", 0.7, 0.85, 0.9, 0.87, 1.25, 0.67, "100.0%", 4],
["b", 0.5, 0.75, 0.85, 0.9, 1.1, 0.58, "100.0%", 4],
["total", 1.2, 1.55, 1.7, 1.9, 1.5, 1.6, "100.0%", 4]]
return {
"key": {"kw": salt + "_kw", "name": "Scenario.name_%s" % salt},
"sla": [{"success": sla_success}],
"info": {"iterations_count": 4, "atomic": atomic,
"stat": {"rows": stat_rows,
"cols": ["Action", "Min (sec)", "Median (sec)",
"90%ile (sec)", "95%ile (sec)",
"Max (sec)", "Avg (sec)", "Success",
"Count"]}},
"iterations": ["<iter-0>", "<iter-1>", "<iter-2>", "<iter-3>"]}
def _sort_trends(self, trends_result):
for r_idx, res in enumerate(trends_result):
trends_result[r_idx]["total"]["values"].sort()
for a_idx, dummy in enumerate(res["atomic"]):
trends_result[r_idx]["atomic"][a_idx]["values"].sort()
return trends_result
def test_add_result_and_get_data(self):
trends = plot.Trends()
for i in 0, 1:
trends.add_result(self._make_result(str(i)))
expected = [
{"atomic": [
{"name": "a",
"success": [("success", [(1, 100.0)])],
"values": [("90%ile", [(1, 0.9)]), ("95%ile", [(1, 0.87)]),
("avg", [(1, 0.67)]), ("max", [(1, 1.25)]),
("median", [(1, 0.85)]), ("min", [(1, 0.7)])]},
{"name": "b",
"success": [("success", [(1, 100.0)])],
"values": [("90%ile", [(1, 0.85)]), ("95%ile", [(1, 0.9)]),
("avg", [(1, 0.58)]), ("max", [(1, 1.1)]),
("median", [(1, 0.75)]), ("min", [(1, 0.5)])]}],
"cls": "Scenario", "config": "\"0_kw\"", "met": "name_0",
"name": "Scenario.name_0", "seq": 1, "single": True,
"sla_failures": 0, "stat": {"avg": 1.6, "max": 1.5, "min": 1.2},
"total": {"success": [("success", [(1, 100.0)])],
"values": [("90%ile", [(1, 1.7)]),
("95%ile", [(1, 1.9)]),
("avg", [(1, 1.6)]),
("max", [(1, 1.5)]),
("median", [(1, 1.55)]),
("min", [(1, 1.2)])]}},
{"atomic": [
{"name": "a",
"success": [("success", [(1, 100.0)])],
"values": [("90%ile", [(1, 0.9)]), ("95%ile", [(1, 0.87)]),
("avg", [(1, 0.67)]), ("max", [(1, 1.25)]),
("median", [(1, 0.85)]), ("min", [(1, 0.7)])]},
{"name": "b",
"success": [("success", [(1, 100.0)])],
"values": [("90%ile", [(1, 0.85)]), ("95%ile", [(1, 0.9)]),
("avg", [(1, 0.58)]), ("max", [(1, 1.1)]),
("median", [(1, 0.75)]), ("min", [(1, 0.5)])]}],
"cls": "Scenario", "config": "\"1_kw\"", "met": "name_1",
"name": "Scenario.name_1", "seq": 1, "single": True,
"sla_failures": 0, "stat": {"avg": 1.6, "max": 1.5, "min": 1.2},
"total": {"success": [("success", [(1, 100.0)])],
"values": [("90%ile", [(1, 1.7)]),
("95%ile", [(1, 1.9)]),
("avg", [(1, 1.6)]),
("max", [(1, 1.5)]),
("median", [(1, 1.55)]),
("min", [(1, 1.2)])]}}]
self.assertEqual(expected, self._sort_trends(trends.get_data()))
def test_add_result_once_and_get_data(self):
trends = plot.Trends()
trends.add_result(self._make_result("foo", sla_success=False))
expected = [
{"atomic": [
{"name": "a",
"success": [("success", [(1, 100.0)])],
"values": [("90%ile", [(1, 0.9)]), ("95%ile", [(1, 0.87)]),
("avg", [(1, 0.67)]), ("max", [(1, 1.25)]),
("median", [(1, 0.85)]), ("min", [(1, 0.7)])]},
{"name": "b",
"success": [("success", [(1, 100.0)])],
"values": [("90%ile", [(1, 0.85)]), ("95%ile", [(1, 0.9)]),
("avg", [(1, 0.58)]), ("max", [(1, 1.1)]),
("median", [(1, 0.75)]), ("min", [(1, 0.5)])]}],
"cls": "Scenario", "config": "\"foo_kw\"", "met": "name_foo",
"name": "Scenario.name_foo", "seq": 1, "single": True,
"sla_failures": 1, "stat": {"avg": 1.6, "max": 1.5, "min": 1.2},
"total": {"success": [("success", [(1, 100.0)])],
"values": [("90%ile", [(1, 1.7)]),
("95%ile", [(1, 1.9)]),
("avg", [(1, 1.6)]),
("max", [(1, 1.5)]),
("median", [(1, 1.55)]),
("min", [(1, 1.2)])]}}]
self.assertEqual(expected, self._sort_trends(trends.get_data()))
def test_add_result_with_na_and_get_data(self):
trends = plot.Trends()
trends.add_result(self._make_result("foo",
sla_success=False, with_na=True))
expected = [
{"atomic": [{"name": "a",
"success": [("success", [(1, 0)])],
"values": [("90%ile", [(1, "n/a")]),
("95%ile", [(1, "n/a")]),
("avg", [(1, "n/a")]),
("max", [(1, "n/a")]),
("median", [(1, "n/a")]),
("min", [(1, "n/a")])]},
{"name": "b",
"success": [("success", [(1, 0)])],
"values": [("90%ile", [(1, "n/a")]),
("95%ile", [(1, "n/a")]),
("avg", [(1, "n/a")]),
("max", [(1, "n/a")]),
("median", [(1, "n/a")]),
("min", [(1, "n/a")])]}],
"cls": "Scenario", "config": "\"foo_kw\"", "met": "name_foo",
"name": "Scenario.name_foo", "seq": 1, "single": True,
"sla_failures": 1, "stat": {"avg": None, "max": None,
"min": None},
"total": {"success": [("success", [(1, 0)])],
"values": [("90%ile", [(1, "n/a")]),
("95%ile", [(1, "n/a")]),
("avg", [(1, "n/a")]),
("max", [(1, "n/a")]),
("median", [(1, "n/a")]),
("min", [(1, "n/a")])]}}]
self.assertEqual(expected, self._sort_trends(trends.get_data()))
def test_get_data_no_results_added(self):
trends = plot.Trends()
self.assertEqual([], trends.get_data())
|
varuntiwari27/rally
|
tests/unit/task/processing/test_plot.py
|
Python
|
apache-2.0
| 18,244
|
# Copyright 2015 Futurewei. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
class FlowClassifierDriverBaseLegacy(metaclass=abc.ABCMeta):
"""Flow Classifier Driver Base Class for legacy driver interface"""
@abc.abstractmethod
def create_flow_classifier(self, context):
pass
@abc.abstractmethod
def update_flow_classifier(self, context):
pass
class FlowClassifierDriverBase(FlowClassifierDriverBaseLegacy,
metaclass=abc.ABCMeta):
"""Flow Classifier Driver Base Class."""
@abc.abstractmethod
def create_flow_classifier_precommit(self, context):
pass
def create_flow_classifier_postcommit(self, context):
self.create_flow_classifier(context)
@abc.abstractmethod
def delete_flow_classifier(self, context):
pass
def delete_flow_classifier_precommit(self, context):
pass
def delete_flow_classifier_postcommit(self, context):
pass
def update_flow_classifier_precommit(self, context):
pass
def update_flow_classifier_postcommit(self, context):
self.update_flow_classifier(context)
|
openstack/networking-sfc
|
networking_sfc/services/flowclassifier/drivers/base.py
|
Python
|
apache-2.0
| 1,707
|
# coding=utf-8
# Copyright 2022 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for matrix square roots."""
from typing import Callable
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax.config import config
import jax.numpy as jnp
import jax.test_util
from ott.geometry import matrix_square_root
def _get_random_spd_matrix(dim: int, key: jnp.ndarray):
# Get a random symmetric, positive definite matrix of a specified size.
key, subkey0, subkey1 = jax.random.split(key, num=3)
# Step 1: generate a random orthogonal matrix
m = jax.random.normal(
key=subkey0,
shape=[dim, dim])
q, _ = jnp.linalg.qr(m)
# Step 2: generate random eigenvalues in [1/2. , 2.] to ensure the condition
# number is reasonable.
eigs = 2. ** (2. * jax.random.uniform(key=subkey1, shape=(dim,)) - 1.)
return jnp.matmul(eigs[None, :] * q, jnp.transpose(q))
def _get_test_fn(
fn: Callable[[jnp.ndarray], jnp.ndarray],
dim: int,
key: jnp.ndarray) -> Callable[[jnp.ndarray], jnp.ndarray]:
# We want to test gradients of a function fn that maps positive definite
# matrices to positive definite matrices by comparing them to finite
# difference approximations. We'll do so via a test function that
# (1) takes an arbitrary real as an input,
# (2) maps the real to a positive definite matrix,
# (3) applies fn, then
# (4) maps the matrix-valued output of fn to a scalar.
key, subkey0, subkey1, subkey2, subkey3 = jax.random.split(key, num=5)
m0 = _get_random_spd_matrix(dim=dim, key=subkey0)
m1 = _get_random_spd_matrix(dim=dim, key=subkey1)
dx = _get_random_spd_matrix(dim=dim, key=subkey2)
unit = jax.random.normal(key=subkey3, shape=(dim, dim))
unit /= jnp.sqrt(jnp.sum(unit ** 2.))
def _test_fn(x: float) -> float:
# m is the product of 2 symmetric, positive definite matrices
# so it will be positive definite but not necessarily symmetric
m = jnp.matmul(m0, m1 + x * dx)
return jnp.sum(fn(m) * unit)
return _test_fn
def _sqrt_plus_inv_sqrt(x: jnp.ndarray) -> jnp.ndarray:
sqrtm = matrix_square_root.sqrtm(x)
return sqrtm[0] + sqrtm[1]
class MatrixSquareRootTest(jax.test_util.JaxTestCase):
def setUp(self):
super().setUp()
key = jax.random.PRNGKey(0)
self.dim = 13
self.batch = 3
# Values for testing the Sylvester solver
# Sylvester equations have the form AX - XB = C
# Shapes: A = (m, m), B = (n, n), C = (m, n), X = (m, n)
m = 3
n = 2
key, subkey0, subkey1, subkey2 = jax.random.split(key, 4)
self.a = jax.random.normal(key=subkey0, shape=(2, m, m))
self.b = jax.random.normal(key=subkey1, shape=(2, n, n))
self.x = jax.random.normal(key=subkey2, shape=(2, m, n))
# make sure the system has a solution
self.c = jnp.matmul(self.a, self.x) - jnp.matmul(self.x, self.b)
self.rng = key
def test_sqrtm(self):
"""Sample a random p.s.d. (Wishart) matrix, check its sqrt matches."""
matrices = jax.random.normal(self.rng, (self.batch, self.dim, 2 * self.dim))
for x in (matrices, matrices[0, :, :]): # try with many and only one.
x = jnp.matmul(x, jnp.swapaxes(x, -1, -2))
threshold = 1e-4
sqrt_x, inv_sqrt_x, errors = matrix_square_root.sqrtm(
x, min_iterations=self.dim, threshold=threshold)
err = errors[errors > -1][-1]
self.assertGreater(threshold, err)
self.assertAllClose(x, jnp.matmul(sqrt_x, sqrt_x), rtol=1e-3, atol=1e-3)
ids = jnp.eye(self.dim)
if jnp.ndim(x) == 3:
ids = ids[jnp.newaxis, :, :]
self.assertAllClose(
jnp.zeros_like(x),
jnp.matmul(x, jnp.matmul(inv_sqrt_x, inv_sqrt_x)) - ids,
atol=1e-2)
def test_sqrtm_batch(self):
"""Check sqrtm on larger of matrices."""
batch_dim0 = 2
batch_dim1 = 2
threshold = 1e-4
m = jax.random.normal(
self.rng, (batch_dim0, batch_dim1, self.dim, 2 * self.dim))
x = jnp.matmul(m, jnp.swapaxes(m, axis1=-2, axis2=-1))
sqrt_x, inv_sqrt_x, errors = matrix_square_root.sqrtm(
x, threshold=threshold, min_iterations=self.dim,)
err = errors[errors > -1][-1]
self.assertGreater(threshold, err)
eye = jnp.eye(self.dim)
for i in range(batch_dim0):
for j in range(batch_dim1):
self.assertAllClose(
x[i, j], jnp.matmul(sqrt_x[i, j], sqrt_x[i, j]),
rtol=1e-3, atol=1e-3)
self.assertAllClose(
eye,
jnp.matmul(x[i, j], jnp.matmul(inv_sqrt_x[i, j], inv_sqrt_x[i, j])),
atol=1e-2)
def test_solve_bartels_stewart(self):
x = matrix_square_root.solve_sylvester_bartels_stewart(
a=self.a[0], b=self.b[0], c=self.c[0])
self.assertAllClose(self.x[0], x, atol=1.e-5)
def test_solve_bartels_stewart_batch(self):
x = matrix_square_root.solve_sylvester_bartels_stewart(
a=self.a, b=self.b, c=self.c)
self.assertAllClose(self.x, x, atol=1.e-5)
x = matrix_square_root.solve_sylvester_bartels_stewart(
a=self.a[None], b=self.b[None], c=self.c[None])
self.assertAllClose(self.x, x[0], atol=1.e-5)
x = matrix_square_root.solve_sylvester_bartels_stewart(
a=self.a[None, None], b=self.b[None, None], c=self.c[None, None])
self.assertAllClose(self.x, x[0, 0], atol=1.e-5)
@parameterized.named_parameters(
dict(
testcase_name='test_sqrtm_sqrtm',
fn=lambda x: matrix_square_root.sqrtm(x)[0],
n_tests=3,
dim=3,
epsilon=1.e-6,
atol=1.e-6,
rtol=1.e-6,
),
dict(
testcase_name='test_sqrtm_inv_sqrtm',
fn=lambda x: matrix_square_root.sqrtm(x)[1],
n_tests=3,
dim=3,
epsilon=1.e-6,
atol=1.e-8,
rtol=1.e-8,
),
dict(
testcase_name='test_sqrtm_sqrtm_plus_inv_sqrtm',
fn=_sqrt_plus_inv_sqrt,
n_tests=3,
dim=3,
epsilon=1.e-6,
atol=1.e-8,
rtol=1.e-8,
),
dict(
testcase_name='test_sqrtm_only',
fn=matrix_square_root.sqrtm_only,
n_tests=3,
dim=3,
epsilon=1.e-6,
atol=1.e-8,
rtol=1.e-8,
),
dict(
testcase_name='test_inv_sqrtm_only',
fn=matrix_square_root.inv_sqrtm_only,
n_tests=3,
dim=2,
epsilon=1.e-6,
atol=1.e-8,
rtol=1.e-8,
),
)
def test_grad(self, fn, n_tests, dim, epsilon, atol, rtol):
config.update('jax_enable_x64', True)
key = self.rng
for _ in range(n_tests):
key, subkey = jax.random.split(key)
test_fn = _get_test_fn(fn, dim=dim, key=subkey)
expected = (test_fn(epsilon) - test_fn(-epsilon)) / (2. * epsilon)
actual = jax.grad(test_fn)(0.)
self.assertAllClose(expected, actual, atol=atol, rtol=rtol)
if __name__ == '__main__':
absltest.main()
|
google-research/ott
|
tests/geometry/matrix_square_root_test.py
|
Python
|
apache-2.0
| 7,483
|
#!/usr/bin/python
# Program: FileTools
# Author: Wayne Dawson
# Version: 0.0
# Creation Date: 140221 (derived from SimRNATools c 140515)
# Last Update: 161118
# the main tool that I use from this is
import re
import sys
debug = False
# debug = True
# Hopefully some tools that can be useful check_ext(testfile, extset, program=''):
#############################
### class Definitions ###
#############################
class FileTools:
def __init__(self):
self.flnm = ''
self.flhd = ''
self.ext = ''
#
# 161118wkd: This was evidently an early tool I wrote and I don't
# think this one is so useful now. In fact, if you only need to
# divide out the data items from an initially indeterminate number
# of spaces (' ') in a string (st), then it is better to use
# st.split(). Indeed, it is better to write
# "st.strip().split()". This operation removes all ' ' characters
# and the '\n' character as well. So st.strip().split() is a
# better strategy and then, if you want to extract some other
# things out of it, then do so in the resulting list.
def clean_split(self, st):
ssv = st.split(' ')
vlist = []
# remove blank spaces
for ssv_k in ssv:
if not ssv_k == '':
vlist += [ssv_k]
return vlist
#
# generates a hollerith
# 161118wkd: This does work, and I have used it a lot in the
# past. However, I found this can also be done by using the package
# "string" and, for a hollerith string of up to 5 zeros and a
# positive integer k, you can generate this with the command
# "string.zfill(k, 5)". Hence, for k = 25, string.zfile(k=25,5)
# leads to "00025".
def hollerith(self, n, slen):
sn = str(n)
hn = ''
for i in range(0,slen-len(sn)):
hn += '0'
hn += sn
return hn
#
# checks the input file <testfile> extension <extset>. The
# variable <extset> can either be a single string (e.g., 'trafl')
# or a list of strings (e.g., ['trafl', 'dGrnk']).
def old_check_ext(self, testfile, allowed_exts):
#
self.flnm = testfile
ss = testfile.split('.')
self.ext = ss[len(ss)-1]
if len(self.ext) == 1:
print "ERROR: file (%s) requires an extension: " % (self.flnm), allowed_exts
sys.exit(1)
#
self.flhd = testfile[:len(testfile)-len(self.ext)-1]
flag_pass = False
if type(allowed_exts) is list:
# print 'more than one extension found'
for ext in allowed_exts:
testext='.' + ext + '$'
p = re.compile(testext)
a = p.findall(testfile)
if (len(a) > 0):
flag_pass = True
else:
# print 'only one extension found'
testext='.' + allowed_exts + '$'
p = re.compile(testext)
a = p.findall(testfile)
if (len(a) > 0):
flag_pass = True
#
if not flag_pass:
xtnsn = ''
if type(allowed_exts) is list:
xtnsn = '\'*.%s\'' % allowed_exts[0]
for i in range(1,len(allowed_exts)):
xtnsn += ' or \'*.%s\'' % allowed_exts[i]
xtnsn += '.'
if len(allowed_exts) == 1:
print "ERROR: file must have the extension %s" % xtnsn
else:
print "ERROR: file must have one of these extensions %s" % xtnsn
else:
xtnsn = '\'*.%s\'' % allowed_exts
print "ERROR: file must have the extension %s" % xtnsn
return flag_pass
#
# checks the input file <testfile> extension <allowed_exts>. The
# variable <allowed_exts> can either be a single string (e.g., 'trafl')
# or a list of strings (e.g., ['trafl', 'dGrnk']).
def check_ext(self, testfile, allowed_exts, program=''):
#
self.flnm = testfile
splf = testfile.split('.')
self.ext = splf[len(splf)-1]
if len(self.ext) == 1:
print "ERROR: file (%s) should have one of the following extensions: " % (self.flnm), allowed_exts
sys.exit(1)
#
self.flhd = testfile[:len(testfile)-len(self.ext)-1]
flag_pass = False
if type(allowed_exts) is list:
# print 'more than one extension found'
for ext in allowed_exts:
if ext == self.ext:
flag_pass = True
break
#
#
else:
if allowed_exts == self.ext:
flag_pass = True
#
#
if not flag_pass:
xtnsn = ''
print "[program: %s]:" % program
if type(allowed_exts) is list: # need to know if it is a list or a string
xtnsn = '\'*.%s\'' % allowed_exts[0]
for i in range(1,len(allowed_exts)):
xtnsn += ' or \'*.%s\'' % allowed_exts[i]
xtnsn += '.'
if len(allowed_exts) == 1:
print "ERROR: file must have the extension %s" % xtnsn
else:
print "ERROR: file must have one of these extensions %s" % xtnsn
#
print " input file '%s' --> (extension: '%s')" % (self.flnm, self.ext)
else:
xtnsn = '\'*.%s\'' % allowed_exts
print "ERROR: file must have the extension %s" % xtnsn
print " input file '%s' --> (extension: '%s')" % (self.flnm, self.ext)
return flag_pass
#
#
|
4dnucleome/looper
|
chreval/FileTools.py
|
Python
|
apache-2.0
| 5,504
|
#!/usr/bin/env python3
# Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
from rosidl_adapter.parser import parse_message_file
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(
description='Parse all recursively found .msg files.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'paths',
nargs='+',
help='The base paths to search for .msg files')
args = parser.parse_args(argv)
files = get_files(args.paths)
for filename in files:
pkg_name = os.path.basename(os.path.dirname(os.path.dirname(filename)))
try:
parse_message_file(pkg_name, filename)
print(pkg_name, filename)
except Exception as e:
print(' ', pkg_name, filename, str(e))
raise
return 0
def get_files(paths):
files = []
for path in paths:
if os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path):
# ignore folder starting with . or _
dirnames[:] = [d for d in dirnames if d[0] not in ['.', '_']]
dirnames.sort()
# select files by extension
for filename in sorted(filenames):
if filename.endswith('.msg'):
files.append(os.path.join(dirpath, filename))
if os.path.isfile(path):
files.append(path)
return files
if __name__ == '__main__':
sys.exit(main())
|
ros2/rosidl
|
rosidl_adapter/test/parse_msg_files.py
|
Python
|
apache-2.0
| 2,091
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=C,R,W
"""This module contains the 'Viz' objects
These objects represent the backend of all the visualizations that
Superset can render.
"""
import copy
import dataclasses
import inspect
import logging
import math
import re
from collections import defaultdict, OrderedDict
from datetime import date, datetime, timedelta
from itertools import product
from typing import (
Any,
Callable,
cast,
Dict,
List,
Optional,
Set,
Tuple,
TYPE_CHECKING,
Union,
)
import geohash
import numpy as np
import pandas as pd
import polyline
import simplejson as json
from dateutil import relativedelta as rdelta
from flask import request
from flask_babel import lazy_gettext as _
from geopy.point import Point
from pandas.tseries.frequencies import to_offset
from superset import app, cache, db, security_manager
from superset.constants import NULL_STRING
from superset.errors import ErrorLevel, SupersetError, SupersetErrorType
from superset.exceptions import (
NullValueException,
QueryObjectValidationError,
SpatialException,
)
from superset.models.cache import CacheKey
from superset.models.helpers import QueryResult
from superset.typing import QueryObjectDict, VizData, VizPayload
from superset.utils import core as utils
from superset.utils.core import (
DTTM_ALIAS,
JS_MAX_INTEGER,
merge_extra_filters,
QueryMode,
to_adhoc,
)
from superset.utils.dates import datetime_to_epoch
from superset.utils.hashing import md5_sha_from_str
if TYPE_CHECKING:
from superset.connectors.base.models import BaseDatasource
config = app.config
stats_logger = config["STATS_LOGGER"]
relative_start = config["DEFAULT_RELATIVE_START_TIME"]
relative_end = config["DEFAULT_RELATIVE_END_TIME"]
logger = logging.getLogger(__name__)
METRIC_KEYS = [
"metric",
"metrics",
"percent_metrics",
"metric_2",
"secondary_metric",
"x",
"y",
"size",
]
def set_and_log_cache(
cache_key: str,
df: pd.DataFrame,
query: str,
cached_dttm: str,
cache_timeout: int,
datasource_uid: Optional[str],
) -> None:
try:
cache_value = dict(dttm=cached_dttm, df=df, query=query)
stats_logger.incr("set_cache_key")
cache.set(cache_key, cache_value, timeout=cache_timeout)
if datasource_uid:
ck = CacheKey(
cache_key=cache_key,
cache_timeout=cache_timeout,
datasource_uid=datasource_uid,
)
db.session.add(ck)
except Exception as ex:
# cache.set call can fail if the backend is down or if
# the key is too large or whatever other reasons
logger.warning("Could not cache key {}".format(cache_key))
logger.exception(ex)
cache.delete(cache_key)
class BaseViz:
"""All visualizations derive this base class"""
viz_type: Optional[str] = None
verbose_name = "Base Viz"
credits = ""
is_timeseries = False
cache_type = "df"
enforce_numerical_metrics = True
def __init__(
self,
datasource: "BaseDatasource",
form_data: Dict[str, Any],
force: bool = False,
) -> None:
if not datasource:
raise Exception(_("Viz is missing a datasource"))
self.datasource = datasource
self.request = request
self.viz_type = form_data.get("viz_type")
self.form_data = form_data
self.query = ""
self.token = utils.get_form_data_token(form_data)
self.groupby: List[str] = self.form_data.get("groupby") or []
self.time_shift = timedelta()
self.status: Optional[str] = None
self.error_msg = ""
self.results: Optional[QueryResult] = None
self.errors: List[Dict[str, Any]] = []
self.force = force
self.from_dttm: Optional[datetime] = None
self.to_dttm: Optional[datetime] = None
# Keeping track of whether some data came from cache
# this is useful to trigger the <CachedLabel /> when
# in the cases where visualization have many queries
# (FilterBox for instance)
self._any_cache_key: Optional[str] = None
self._any_cached_dttm: Optional[str] = None
self._extra_chart_data: List[Tuple[str, pd.DataFrame]] = []
self.process_metrics()
def process_metrics(self) -> None:
# metrics in TableViz is order sensitive, so metric_dict should be
# OrderedDict
self.metric_dict = OrderedDict()
fd = self.form_data
for mkey in METRIC_KEYS:
val = fd.get(mkey)
if val:
if not isinstance(val, list):
val = [val]
for o in val:
label = utils.get_metric_name(o)
self.metric_dict[label] = o
# Cast to list needed to return serializable object in py3
self.all_metrics = list(self.metric_dict.values())
self.metric_labels = list(self.metric_dict.keys())
@staticmethod
def handle_js_int_overflow(
data: Dict[str, List[Dict[str, Any]]]
) -> Dict[str, List[Dict[str, Any]]]:
for d in data.get("records", {}):
for k, v in list(d.items()):
if isinstance(v, int):
# if an int is too big for Java Script to handle
# convert it to a string
if abs(v) > JS_MAX_INTEGER:
d[k] = str(v)
return data
def run_extra_queries(self) -> None:
"""Lifecycle method to use when more than one query is needed
In rare-ish cases, a visualization may need to execute multiple
queries. That is the case for FilterBox or for time comparison
in Line chart for instance.
In those cases, we need to make sure these queries run before the
main `get_payload` method gets called, so that the overall caching
metadata can be right. The way it works here is that if any of
the previous `get_df_payload` calls hit the cache, the main
payload's metadata will reflect that.
The multi-query support may need more work to become a first class
use case in the framework, and for the UI to reflect the subtleties
(show that only some of the queries were served from cache for
instance). In the meantime, since multi-query is rare, we treat
it with a bit of a hack. Note that the hack became necessary
when moving from caching the visualization's data itself, to caching
the underlying query(ies).
"""
pass
def apply_rolling(self, df: pd.DataFrame) -> pd.DataFrame:
fd = self.form_data
rolling_type = fd.get("rolling_type")
rolling_periods = int(fd.get("rolling_periods") or 0)
min_periods = int(fd.get("min_periods") or 0)
if rolling_type in ("mean", "std", "sum") and rolling_periods:
kwargs = dict(window=rolling_periods, min_periods=min_periods)
if rolling_type == "mean":
df = df.rolling(**kwargs).mean()
elif rolling_type == "std":
df = df.rolling(**kwargs).std()
elif rolling_type == "sum":
df = df.rolling(**kwargs).sum()
elif rolling_type == "cumsum":
df = df.cumsum()
if min_periods:
df = df[min_periods:]
if df.empty:
raise QueryObjectValidationError(
_(
"Applied rolling window did not return any data. Please make sure "
"the source query satisfies the minimum periods defined in the "
"rolling window."
)
)
return df
def get_samples(self) -> List[Dict[str, Any]]:
query_obj = self.query_obj()
query_obj.update(
{
"groupby": [],
"metrics": [],
"row_limit": config["SAMPLES_ROW_LIMIT"],
"columns": [o.column_name for o in self.datasource.columns],
}
)
df = self.get_df(query_obj)
return df.to_dict(orient="records")
def get_df(self, query_obj: Optional[QueryObjectDict] = None) -> pd.DataFrame:
"""Returns a pandas dataframe based on the query object"""
if not query_obj:
query_obj = self.query_obj()
if not query_obj:
return pd.DataFrame()
self.error_msg = ""
timestamp_format = None
if self.datasource.type == "table":
granularity_col = self.datasource.get_column(query_obj["granularity"])
if granularity_col:
timestamp_format = granularity_col.python_date_format
# The datasource here can be different backend but the interface is common
self.results = self.datasource.query(query_obj)
self.query = self.results.query
self.status = self.results.status
self.errors = self.results.errors
df = self.results.df
# Transform the timestamp we received from database to pandas supported
# datetime format. If no python_date_format is specified, the pattern will
# be considered as the default ISO date format
# If the datetime format is unix, the parse will use the corresponding
# parsing logic.
if not df.empty:
if DTTM_ALIAS in df.columns:
if timestamp_format in ("epoch_s", "epoch_ms"):
# Column has already been formatted as a timestamp.
dttm_col = df[DTTM_ALIAS]
one_ts_val = dttm_col[0]
# convert time column to pandas Timestamp, but different
# ways to convert depending on string or int types
try:
int(one_ts_val)
is_integral = True
except (ValueError, TypeError):
is_integral = False
if is_integral:
unit = "s" if timestamp_format == "epoch_s" else "ms"
df[DTTM_ALIAS] = pd.to_datetime(
dttm_col, utc=False, unit=unit, origin="unix"
)
else:
df[DTTM_ALIAS] = dttm_col.apply(pd.Timestamp)
else:
df[DTTM_ALIAS] = pd.to_datetime(
df[DTTM_ALIAS], utc=False, format=timestamp_format
)
if self.datasource.offset:
df[DTTM_ALIAS] += timedelta(hours=self.datasource.offset)
df[DTTM_ALIAS] += self.time_shift
if self.enforce_numerical_metrics:
self.df_metrics_to_num(df)
df.replace([np.inf, -np.inf], np.nan, inplace=True)
return df
def df_metrics_to_num(self, df: pd.DataFrame) -> None:
"""Converting metrics to numeric when pandas.read_sql cannot"""
metrics = self.metric_labels
for col, dtype in df.dtypes.items():
if dtype.type == np.object_ and col in metrics:
df[col] = pd.to_numeric(df[col], errors="coerce")
def process_query_filters(self) -> None:
utils.convert_legacy_filters_into_adhoc(self.form_data)
merge_extra_filters(self.form_data)
utils.split_adhoc_filters_into_base_filters(self.form_data)
def query_obj(self) -> QueryObjectDict:
"""Building a query object"""
form_data = self.form_data
self.process_query_filters()
gb = self.groupby
metrics = self.all_metrics or []
columns = form_data.get("columns") or []
# merge list and dedup while preserving order
groupby = list(OrderedDict.fromkeys(gb + columns))
is_timeseries = self.is_timeseries
if DTTM_ALIAS in groupby:
groupby.remove(DTTM_ALIAS)
is_timeseries = True
granularity = form_data.get("granularity") or form_data.get("granularity_sqla")
limit = int(form_data.get("limit") or 0)
timeseries_limit_metric = form_data.get("timeseries_limit_metric")
row_limit = int(form_data.get("row_limit") or config["ROW_LIMIT"])
# default order direction
order_desc = form_data.get("order_desc", True)
try:
since, until = utils.get_since_until(
relative_start=relative_start,
relative_end=relative_end,
time_range=form_data.get("time_range"),
since=form_data.get("since"),
until=form_data.get("until"),
)
except ValueError as ex:
raise QueryObjectValidationError(str(ex))
time_shift = form_data.get("time_shift", "")
self.time_shift = utils.parse_past_timedelta(time_shift)
from_dttm = None if since is None else (since - self.time_shift)
to_dttm = None if until is None else (until - self.time_shift)
if from_dttm and to_dttm and from_dttm > to_dttm:
raise QueryObjectValidationError(
_("From date cannot be larger than to date")
)
self.from_dttm = from_dttm
self.to_dttm = to_dttm
# extras are used to query elements specific to a datasource type
# for instance the extra where clause that applies only to Tables
extras = {
"druid_time_origin": form_data.get("druid_time_origin", ""),
"having": form_data.get("having", ""),
"having_druid": form_data.get("having_filters", []),
"time_grain_sqla": form_data.get("time_grain_sqla"),
"time_range_endpoints": form_data.get("time_range_endpoints"),
"where": form_data.get("where", ""),
}
return {
"granularity": granularity,
"from_dttm": from_dttm,
"to_dttm": to_dttm,
"is_timeseries": is_timeseries,
"groupby": groupby,
"metrics": metrics,
"row_limit": row_limit,
"filter": self.form_data.get("filters", []),
"timeseries_limit": limit,
"extras": extras,
"timeseries_limit_metric": timeseries_limit_metric,
"order_desc": order_desc,
}
@property
def cache_timeout(self) -> int:
if self.form_data.get("cache_timeout") is not None:
return int(self.form_data["cache_timeout"])
if self.datasource.cache_timeout is not None:
return self.datasource.cache_timeout
if (
hasattr(self.datasource, "database")
and self.datasource.database.cache_timeout
) is not None:
return self.datasource.database.cache_timeout
return config["CACHE_DEFAULT_TIMEOUT"]
def get_json(self) -> str:
return json.dumps(
self.get_payload(), default=utils.json_int_dttm_ser, ignore_nan=True
)
def cache_key(self, query_obj: QueryObjectDict, **extra: Any) -> str:
"""
The cache key is made out of the key/values in `query_obj`, plus any
other key/values in `extra`.
We remove datetime bounds that are hard values, and replace them with
the use-provided inputs to bounds, which may be time-relative (as in
"5 days ago" or "now").
The `extra` arguments are currently used by time shift queries, since
different time shifts wil differ only in the `from_dttm`, `to_dttm`,
`inner_from_dttm`, and `inner_to_dttm` values which are stripped.
"""
cache_dict = copy.copy(query_obj)
cache_dict.update(extra)
for k in ["from_dttm", "to_dttm", "inner_from_dttm", "inner_to_dttm"]:
if k in cache_dict:
del cache_dict[k]
cache_dict["time_range"] = self.form_data.get("time_range")
cache_dict["datasource"] = self.datasource.uid
cache_dict["extra_cache_keys"] = self.datasource.get_extra_cache_keys(query_obj)
cache_dict["rls"] = (
security_manager.get_rls_ids(self.datasource)
if config["ENABLE_ROW_LEVEL_SECURITY"] and self.datasource.is_rls_supported
else []
)
cache_dict["changed_on"] = self.datasource.changed_on
json_data = self.json_dumps(cache_dict, sort_keys=True)
return md5_sha_from_str(json_data)
def get_payload(self, query_obj: Optional[QueryObjectDict] = None) -> VizPayload:
"""Returns a payload of metadata and data"""
self.run_extra_queries()
payload = self.get_df_payload(query_obj)
df = payload.get("df")
if self.status != utils.QueryStatus.FAILED:
payload["data"] = self.get_data(df)
if "df" in payload:
del payload["df"]
return payload
def get_df_payload(
self, query_obj: Optional[QueryObjectDict] = None, **kwargs: Any
) -> Dict[str, Any]:
"""Handles caching around the df payload retrieval"""
if not query_obj:
query_obj = self.query_obj()
cache_key = self.cache_key(query_obj, **kwargs) if query_obj else None
logger.info("Cache key: {}".format(cache_key))
is_loaded = False
stacktrace = None
df = None
cached_dttm = datetime.utcnow().isoformat().split(".")[0]
if cache_key and cache and not self.force:
cache_value = cache.get(cache_key)
if cache_value:
stats_logger.incr("loading_from_cache")
try:
df = cache_value["df"]
self.query = cache_value["query"]
self._any_cached_dttm = cache_value["dttm"]
self._any_cache_key = cache_key
self.status = utils.QueryStatus.SUCCESS
is_loaded = True
stats_logger.incr("loaded_from_cache")
except Exception as ex:
logger.exception(ex)
logger.error(
"Error reading cache: " + utils.error_msg_from_exception(ex)
)
logger.info("Serving from cache")
if query_obj and not is_loaded:
try:
invalid_columns = [
col
for col in (query_obj.get("columns") or [])
+ (query_obj.get("groupby") or [])
+ utils.get_column_names_from_metrics(
cast(
List[Union[str, Dict[str, Any]]], query_obj.get("metrics"),
)
)
if col not in self.datasource.column_names
]
if invalid_columns:
raise QueryObjectValidationError(
_(
"Columns missing in datasource: %(invalid_columns)s",
invalid_columns=invalid_columns,
)
)
df = self.get_df(query_obj)
if self.status != utils.QueryStatus.FAILED:
stats_logger.incr("loaded_from_source")
if not self.force:
stats_logger.incr("loaded_from_source_without_force")
is_loaded = True
except QueryObjectValidationError as ex:
error = dataclasses.asdict(
SupersetError(
message=str(ex),
level=ErrorLevel.ERROR,
error_type=SupersetErrorType.VIZ_GET_DF_ERROR,
)
)
self.errors.append(error)
self.status = utils.QueryStatus.FAILED
except Exception as ex:
logger.exception(ex)
error = dataclasses.asdict(
SupersetError(
message=str(ex),
level=ErrorLevel.ERROR,
error_type=SupersetErrorType.VIZ_GET_DF_ERROR,
)
)
self.errors.append(error)
self.status = utils.QueryStatus.FAILED
stacktrace = utils.get_stacktrace()
if (
is_loaded
and cache_key
and cache
and self.status != utils.QueryStatus.FAILED
):
set_and_log_cache(
cache_key,
df,
self.query,
cached_dttm,
self.cache_timeout,
self.datasource.uid,
)
return {
"cache_key": self._any_cache_key,
"cached_dttm": self._any_cached_dttm,
"cache_timeout": self.cache_timeout,
"df": df,
"errors": self.errors,
"form_data": self.form_data,
"is_cached": self._any_cache_key is not None,
"query": self.query,
"from_dttm": self.from_dttm,
"to_dttm": self.to_dttm,
"status": self.status,
"stacktrace": stacktrace,
"rowcount": len(df.index) if df is not None else 0,
}
def json_dumps(self, obj: Any, sort_keys: bool = False) -> str:
return json.dumps(
obj, default=utils.json_int_dttm_ser, ignore_nan=True, sort_keys=sort_keys
)
def payload_json_and_has_error(self, payload: VizPayload) -> Tuple[str, bool]:
has_error = (
payload.get("status") == utils.QueryStatus.FAILED
or payload.get("error") is not None
or bool(payload.get("errors"))
)
return self.json_dumps(payload), has_error
@property
def data(self) -> Dict[str, Any]:
"""This is the data object serialized to the js layer"""
content = {
"form_data": self.form_data,
"token": self.token,
"viz_name": self.viz_type,
"filter_select_enabled": self.datasource.filter_select_enabled,
}
return content
def get_csv(self) -> Optional[str]:
df = self.get_df()
include_index = not isinstance(df.index, pd.RangeIndex)
return df.to_csv(index=include_index, **config["CSV_EXPORT"])
def get_data(self, df: pd.DataFrame) -> VizData:
return df.to_dict(orient="records")
@property
def json_data(self) -> str:
return json.dumps(self.data)
def raise_for_access(self) -> None:
"""
Raise an exception if the user cannot access the resource.
:raises SupersetSecurityException: If the user cannot access the resource
"""
security_manager.raise_for_access(viz=self)
class TableViz(BaseViz):
"""A basic html table that is sortable and searchable"""
viz_type = "table"
verbose_name = _("Table View")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
enforce_numerical_metrics = False
def process_metrics(self) -> None:
"""Process form data and store parsed column configs.
1. Determine query mode based on form_data params.
- Use `query_mode` if it has a valid value
- Set as RAW mode if `all_columns` is set
- Otherwise defaults to AGG mode
2. Determine output columns based on query mode.
"""
# Verify form data first: if not specifying query mode, then cannot have both
# GROUP BY and RAW COLUMNS.
fd = self.form_data
if (
not fd.get("query_mode")
and fd.get("all_columns")
and (fd.get("groupby") or fd.get("metrics") or fd.get("percent_metrics"))
):
raise QueryObjectValidationError(
_(
"You cannot use [Columns] in combination with "
"[Group By]/[Metrics]/[Percentage Metrics]. "
"Please choose one or the other."
)
)
super().process_metrics()
self.query_mode: QueryMode = QueryMode.get(fd.get("query_mode")) or (
# infer query mode from the presence of other fields
QueryMode.RAW
if len(fd.get("all_columns") or []) > 0
else QueryMode.AGGREGATE
)
columns: List[str] = [] # output columns sans time and percent_metric column
percent_columns: List[str] = [] # percent columns that needs extra computation
if self.query_mode == QueryMode.RAW:
columns = utils.get_metric_names(fd.get("all_columns") or [])
else:
columns = utils.get_metric_names(self.groupby + (fd.get("metrics") or []))
percent_columns = utils.get_metric_names(fd.get("percent_metrics") or [])
self.columns = columns
self.percent_columns = percent_columns
self.is_timeseries = self.should_be_timeseries()
def should_be_timeseries(self) -> bool:
fd = self.form_data
# TODO handle datasource-type-specific code in datasource
conditions_met = (fd.get("granularity") and fd.get("granularity") != "all") or (
fd.get("granularity_sqla") and fd.get("time_grain_sqla")
)
if fd.get("include_time") and not conditions_met:
raise QueryObjectValidationError(
_("Pick a granularity in the Time section or " "uncheck 'Include Time'")
)
return bool(fd.get("include_time"))
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
if self.query_mode == QueryMode.RAW:
d["columns"] = fd.get("all_columns")
order_by_cols = fd.get("order_by_cols") or []
d["orderby"] = [json.loads(t) for t in order_by_cols]
# must disable groupby and metrics in raw mode
d["groupby"] = []
d["metrics"] = []
# raw mode does not support timeseries queries
d["timeseries_limit_metric"] = None
d["timeseries_limit"] = None
d["is_timeseries"] = None
else:
sort_by = fd.get("timeseries_limit_metric")
if sort_by:
sort_by_label = utils.get_metric_name(sort_by)
if sort_by_label not in d["metrics"]:
d["metrics"].append(sort_by)
d["orderby"] = [(sort_by, not fd.get("order_desc", True))]
return d
def get_data(self, df: pd.DataFrame) -> VizData:
"""
Transform the query result to the table representation.
:param df: The interim dataframe
:returns: The table visualization data
The interim dataframe comprises of the group-by and non-group-by columns and
the union of the metrics representing the non-percent and percent metrics. Note
the percent metrics have yet to be transformed.
"""
# Transform the data frame to adhere to the UI ordering of the columns and
# metrics whilst simultaneously computing the percentages (via normalization)
# for the percent metrics.
if df.empty:
return None
columns, percent_columns = self.columns, self.percent_columns
if DTTM_ALIAS in df and self.is_timeseries:
columns = [DTTM_ALIAS] + columns
df = pd.concat(
[
df[columns],
(df[percent_columns].div(df[percent_columns].sum()).add_prefix("%")),
],
axis=1,
)
return self.handle_js_int_overflow(
dict(records=df.to_dict(orient="records"), columns=list(df.columns))
)
def json_dumps(self, obj: Any, sort_keys: bool = False) -> str:
return json.dumps(
obj, default=utils.json_iso_dttm_ser, sort_keys=sort_keys, ignore_nan=True
)
class TimeTableViz(BaseViz):
"""A data table with rich time-series related columns"""
viz_type = "time_table"
verbose_name = _("Time Table View")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
if not fd.get("metrics"):
raise QueryObjectValidationError(_("Pick at least one metric"))
if fd.get("groupby") and len(fd["metrics"]) > 1:
raise QueryObjectValidationError(
_("When using 'Group By' you are limited to use a single metric")
)
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
columns = None
values: Union[List[str], str] = self.metric_labels
if fd.get("groupby"):
values = self.metric_labels[0]
columns = fd.get("groupby")
pt = df.pivot_table(index=DTTM_ALIAS, columns=columns, values=values)
pt.index = pt.index.map(str)
pt = pt.sort_index()
return dict(
records=pt.to_dict(orient="index"),
columns=list(pt.columns),
is_group_by=len(fd.get("groupby", [])) > 0,
)
class PivotTableViz(BaseViz):
"""A pivot table view, define your rows, columns and metrics"""
viz_type = "pivot_table"
verbose_name = _("Pivot Table")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
enforce_numerical_metrics = False
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
groupby = self.form_data.get("groupby")
columns = self.form_data.get("columns")
metrics = self.form_data.get("metrics")
transpose = self.form_data.get("transpose_pivot")
if not columns:
columns = []
if not groupby:
groupby = []
if not groupby:
raise QueryObjectValidationError(
_("Please choose at least one 'Group by' field ")
)
if transpose and not columns:
raise QueryObjectValidationError(
_(
(
"Please choose at least one 'Columns' field when "
"select 'Transpose Pivot' option"
)
)
)
if not metrics:
raise QueryObjectValidationError(_("Please choose at least one metric"))
if set(groupby) & set(columns):
raise QueryObjectValidationError(_("Group By' and 'Columns' can't overlap"))
return d
@staticmethod
def get_aggfunc(
metric: str, df: pd.DataFrame, form_data: Dict[str, Any]
) -> Union[str, Callable[[Any], Any]]:
aggfunc = form_data.get("pandas_aggfunc") or "sum"
if pd.api.types.is_numeric_dtype(df[metric]):
# Ensure that Pandas's sum function mimics that of SQL.
if aggfunc == "sum":
return lambda x: x.sum(min_count=1)
# only min and max work properly for non-numerics
return aggfunc if aggfunc in ("min", "max") else "max"
@staticmethod
def _format_datetime(value: Union[pd.Timestamp, datetime, date, str]) -> str:
"""
Format a timestamp in such a way that the viz will be able to apply
the correct formatting in the frontend.
:param value: the value of a temporal column
:return: formatted timestamp if it is a valid timestamp, otherwise
the original value
"""
tstamp: Optional[pd.Timestamp] = None
if isinstance(value, pd.Timestamp):
tstamp = value
if isinstance(value, datetime) or isinstance(value, date):
tstamp = pd.Timestamp(value)
if isinstance(value, str):
try:
tstamp = pd.Timestamp(value)
except ValueError:
pass
if tstamp:
return f"__timestamp:{datetime_to_epoch(tstamp)}"
# fallback in case something incompatible is returned
return cast(str, value)
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
if self.form_data.get("granularity") == "all" and DTTM_ALIAS in df:
del df[DTTM_ALIAS]
metrics = [utils.get_metric_name(m) for m in self.form_data["metrics"]]
aggfuncs: Dict[str, Union[str, Callable[[Any], Any]]] = {}
for metric in metrics:
aggfuncs[metric] = self.get_aggfunc(metric, df, self.form_data)
groupby = self.form_data.get("groupby") or []
columns = self.form_data.get("columns") or []
for column_name in groupby + columns:
column = self.datasource.get_column(column_name)
if column and column.is_temporal:
ts = df[column_name].apply(self._format_datetime)
df[column_name] = ts
if self.form_data.get("transpose_pivot"):
groupby, columns = columns, groupby
df = df.pivot_table(
index=groupby,
columns=columns,
values=metrics,
aggfunc=aggfuncs,
margins=self.form_data.get("pivot_margins"),
)
# Re-order the columns adhering to the metric ordering.
df = df[metrics]
# Display metrics side by side with each column
if self.form_data.get("combine_metric"):
df = df.stack(0).unstack()
return dict(
columns=list(df.columns),
html=df.to_html(
na_rep="null",
classes=(
"dataframe table table-striped table-bordered "
"table-condensed table-hover"
).split(" "),
),
)
class TreemapViz(BaseViz):
"""Tree map visualisation for hierarchical data."""
viz_type = "treemap"
verbose_name = _("Treemap")
credits = '<a href="https://d3js.org">d3.js</a>'
is_timeseries = False
def _nest(self, metric: str, df: pd.DataFrame) -> List[Dict[str, Any]]:
nlevels = df.index.nlevels
if nlevels == 1:
result = [{"name": n, "value": v} for n, v in zip(df.index, df[metric])]
else:
result = [
{"name": l, "children": self._nest(metric, df.loc[l])}
for l in df.index.levels[0]
]
return result
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df = df.set_index(self.form_data.get("groupby"))
chart_data = [
{"name": metric, "children": self._nest(metric, df)}
for metric in df.columns
]
return chart_data
class CalHeatmapViz(BaseViz):
"""Calendar heatmap."""
viz_type = "cal_heatmap"
verbose_name = _("Calendar Heatmap")
credits = "<a href=https://github.com/wa0x6e/cal-heatmap>cal-heatmap</a>"
is_timeseries = True
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
form_data = self.form_data
data = {}
records = df.to_dict("records")
for metric in self.metric_labels:
values = {}
for obj in records:
v = obj[DTTM_ALIAS]
if hasattr(v, "value"):
v = v.value
values[str(v / 10 ** 9)] = obj.get(metric)
data[metric] = values
try:
start, end = utils.get_since_until(
relative_start=relative_start,
relative_end=relative_end,
time_range=form_data.get("time_range"),
since=form_data.get("since"),
until=form_data.get("until"),
)
except ValueError as ex:
raise QueryObjectValidationError(str(ex))
if not start or not end:
raise QueryObjectValidationError(
"Please provide both time bounds (Since and Until)"
)
domain = form_data.get("domain_granularity")
diff_delta = rdelta.relativedelta(end, start)
diff_secs = (end - start).total_seconds()
if domain == "year":
range_ = diff_delta.years + 1
elif domain == "month":
range_ = diff_delta.years * 12 + diff_delta.months + 1
elif domain == "week":
range_ = diff_delta.years * 53 + diff_delta.weeks + 1
elif domain == "day":
range_ = diff_secs // (24 * 60 * 60) + 1 # type: ignore
else:
range_ = diff_secs // (60 * 60) + 1 # type: ignore
return {
"data": data,
"start": start,
"domain": domain,
"subdomain": form_data.get("subdomain_granularity"),
"range": range_,
}
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
d["metrics"] = fd.get("metrics")
return d
class NVD3Viz(BaseViz):
"""Base class for all nvd3 vizs"""
credits = '<a href="http://nvd3.org/">NVD3.org</a>'
viz_type: Optional[str] = None
verbose_name = "Base NVD3 Viz"
is_timeseries = False
class BoxPlotViz(NVD3Viz):
"""Box plot viz from ND3"""
viz_type = "box_plot"
verbose_name = _("Box Plot")
sort_series = False
is_timeseries = True
def to_series(
self, df: pd.DataFrame, classed: str = "", title_suffix: str = ""
) -> List[Dict[str, Any]]:
label_sep = " - "
chart_data = []
for index_value, row in zip(df.index, df.to_dict(orient="records")):
if isinstance(index_value, tuple):
index_value = label_sep.join(index_value)
boxes: Dict[str, Dict[str, Any]] = defaultdict(dict)
for (label, key), value in row.items():
if key == "nanmedian":
key = "Q2"
boxes[label][key] = value
for label, box in boxes.items():
if len(self.form_data["metrics"]) > 1:
# need to render data labels with metrics
chart_label = label_sep.join([index_value, label])
else:
chart_label = index_value
chart_data.append({"label": chart_label, "values": box})
return chart_data
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
form_data = self.form_data
# conform to NVD3 names
def Q1(series: pd.Series) -> float:
# need to be named functions - can't use lambdas
return np.nanpercentile(series, 25)
def Q3(series: pd.Series) -> float:
return np.nanpercentile(series, 75)
whisker_type = form_data.get("whisker_options")
if whisker_type == "Tukey":
def whisker_high(series: pd.Series) -> float:
upper_outer_lim = Q3(series) + 1.5 * (Q3(series) - Q1(series))
return series[series <= upper_outer_lim].max()
def whisker_low(series: pd.Series) -> float:
lower_outer_lim = Q1(series) - 1.5 * (Q3(series) - Q1(series))
return series[series >= lower_outer_lim].min()
elif whisker_type == "Min/max (no outliers)":
def whisker_high(series: pd.Series) -> float:
return series.max()
def whisker_low(series: pd.Series) -> float:
return series.min()
elif " percentiles" in whisker_type: # type: ignore
low, high = cast(str, whisker_type).replace(" percentiles", "").split("/")
def whisker_high(series: pd.Series) -> float:
return np.nanpercentile(series, int(high))
def whisker_low(series: pd.Series) -> float:
return np.nanpercentile(series, int(low))
else:
raise ValueError("Unknown whisker type: {}".format(whisker_type))
def outliers(series: pd.Series) -> Set[float]:
above = series[series > whisker_high(series)]
below = series[series < whisker_low(series)]
# pandas sometimes doesn't like getting lists back here
return set(above.tolist() + below.tolist())
aggregate = [Q1, np.nanmedian, Q3, whisker_high, whisker_low, outliers]
df = df.groupby(form_data.get("groupby")).agg(aggregate)
chart_data = self.to_series(df)
return chart_data
class BubbleViz(NVD3Viz):
"""Based on the NVD3 bubble chart"""
viz_type = "bubble"
verbose_name = _("Bubble Chart")
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
form_data = self.form_data
d = super().query_obj()
d["groupby"] = [form_data.get("entity")]
if form_data.get("series"):
d["groupby"].append(form_data.get("series"))
# dedup groupby if it happens to be the same
d["groupby"] = list(dict.fromkeys(d["groupby"]))
self.x_metric = form_data["x"]
self.y_metric = form_data["y"]
self.z_metric = form_data["size"]
self.entity = form_data.get("entity")
self.series = form_data.get("series") or self.entity
d["row_limit"] = form_data.get("limit")
d["metrics"] = [self.z_metric, self.x_metric, self.y_metric]
if len(set(self.metric_labels)) < 3:
raise QueryObjectValidationError(_("Please use 3 different metric labels"))
if not all(d["metrics"] + [self.entity]):
raise QueryObjectValidationError(_("Pick a metric for x, y and size"))
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df["x"] = df[[utils.get_metric_name(self.x_metric)]]
df["y"] = df[[utils.get_metric_name(self.y_metric)]]
df["size"] = df[[utils.get_metric_name(self.z_metric)]]
df["shape"] = "circle"
df["group"] = df[[self.series]]
series: Dict[Any, List[Any]] = defaultdict(list)
for row in df.to_dict(orient="records"):
series[row["group"]].append(row)
chart_data = []
for k, v in series.items():
chart_data.append({"key": k, "values": v})
return chart_data
class BulletViz(NVD3Viz):
"""Based on the NVD3 bullet chart"""
viz_type = "bullet"
verbose_name = _("Bullet Chart")
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
form_data = self.form_data
d = super().query_obj()
self.metric = form_data["metric"]
d["metrics"] = [self.metric]
if not self.metric:
raise QueryObjectValidationError(_("Pick a metric to display"))
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df["metric"] = df[[utils.get_metric_name(self.metric)]]
values = df["metric"].values
return {
"measures": values.tolist(),
}
class BigNumberViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number"
verbose_name = _("Big Number with Trendline")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
metric = self.form_data.get("metric")
if not metric:
raise QueryObjectValidationError(_("Pick a metric!"))
d["metrics"] = [self.form_data.get("metric")]
self.form_data["metric"] = metric
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df = df.pivot_table(
index=DTTM_ALIAS,
columns=[],
values=self.metric_labels,
dropna=False,
aggfunc=np.min, # looking for any (only) value, preserving `None`
)
df = self.apply_rolling(df)
df[DTTM_ALIAS] = df.index
return super().get_data(df)
class BigNumberTotalViz(BaseViz):
"""Put emphasis on a single metric with this big number viz"""
viz_type = "big_number_total"
verbose_name = _("Big Number")
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
metric = self.form_data.get("metric")
if not metric:
raise QueryObjectValidationError(_("Pick a metric!"))
d["metrics"] = [self.form_data.get("metric")]
self.form_data["metric"] = metric
# Limiting rows is not required as only one cell is returned
d["row_limit"] = None
return d
class NVD3TimeSeriesViz(NVD3Viz):
"""A rich line chart component with tons of options"""
viz_type = "line"
verbose_name = _("Time Series - Line Chart")
sort_series = False
is_timeseries = True
pivot_fill_value: Optional[int] = None
def to_series(
self, df: pd.DataFrame, classed: str = "", title_suffix: str = ""
) -> List[Dict[str, Any]]:
cols = []
for col in df.columns:
if col == "":
cols.append("N/A")
elif col is None:
cols.append("NULL")
else:
cols.append(col)
df.columns = cols
series = df.to_dict("series")
chart_data = []
for name in df.T.index.tolist():
ys = series[name]
if df[name].dtype.kind not in "biufc":
continue
series_title: Union[List[str], str, Tuple[str, ...]]
if isinstance(name, list):
series_title = [str(title) for title in name]
elif isinstance(name, tuple):
series_title = tuple(str(title) for title in name)
else:
series_title = str(name)
if (
isinstance(series_title, (list, tuple))
and len(series_title) > 1
and len(self.metric_labels) == 1
):
# Removing metric from series name if only one metric
series_title = series_title[1:]
if title_suffix:
if isinstance(series_title, str):
series_title = (series_title, title_suffix)
elif isinstance(series_title, list):
series_title = series_title + [title_suffix]
elif isinstance(series_title, tuple):
series_title = series_title + (title_suffix,)
values = []
non_nan_cnt = 0
for ds in df.index:
if ds in ys:
d = {"x": ds, "y": ys[ds]}
if not np.isnan(ys[ds]):
non_nan_cnt += 1
else:
d = {}
values.append(d)
if non_nan_cnt == 0:
continue
d = {"key": series_title, "values": values}
if classed:
d["classed"] = classed
chart_data.append(d)
return chart_data
def process_data(self, df: pd.DataFrame, aggregate: bool = False) -> VizData:
fd = self.form_data
if fd.get("granularity") == "all":
raise QueryObjectValidationError(
_("Pick a time granularity for your time series")
)
if df.empty:
return df
if aggregate:
df = df.pivot_table(
index=DTTM_ALIAS,
columns=fd.get("groupby"),
values=self.metric_labels,
fill_value=0,
aggfunc=sum,
)
else:
df = df.pivot_table(
index=DTTM_ALIAS,
columns=fd.get("groupby"),
values=self.metric_labels,
fill_value=self.pivot_fill_value,
)
rule = fd.get("resample_rule")
method = fd.get("resample_method")
if rule and method:
df = getattr(df.resample(rule), method)()
if self.sort_series:
dfs = df.sum()
dfs.sort_values(ascending=False, inplace=True)
df = df[dfs.index]
df = self.apply_rolling(df)
if fd.get("contribution"):
dft = df.T
df = (dft / dft.sum()).T
return df
def run_extra_queries(self) -> None:
fd = self.form_data
time_compare = fd.get("time_compare") or []
# backwards compatibility
if not isinstance(time_compare, list):
time_compare = [time_compare]
for option in time_compare:
query_object = self.query_obj()
try:
delta = utils.parse_past_timedelta(option)
except ValueError as ex:
raise QueryObjectValidationError(str(ex))
query_object["inner_from_dttm"] = query_object["from_dttm"]
query_object["inner_to_dttm"] = query_object["to_dttm"]
if not query_object["from_dttm"] or not query_object["to_dttm"]:
raise QueryObjectValidationError(
_(
"`Since` and `Until` time bounds should be specified "
"when using the `Time Shift` feature."
)
)
query_object["from_dttm"] -= delta
query_object["to_dttm"] -= delta
df2 = self.get_df_payload(query_object, time_compare=option).get("df")
if df2 is not None and DTTM_ALIAS in df2:
label = "{} offset".format(option)
df2[DTTM_ALIAS] += delta
df2 = self.process_data(df2)
self._extra_chart_data.append((label, df2))
def get_data(self, df: pd.DataFrame) -> VizData:
fd = self.form_data
comparison_type = fd.get("comparison_type") or "values"
df = self.process_data(df)
if comparison_type == "values":
# Filter out series with all NaN
chart_data = self.to_series(df.dropna(axis=1, how="all"))
for i, (label, df2) in enumerate(self._extra_chart_data):
chart_data.extend(
self.to_series(
df2, classed="time-shift-{}".format(i), title_suffix=label
)
)
else:
chart_data = []
for i, (label, df2) in enumerate(self._extra_chart_data):
# reindex df2 into the df2 index
combined_index = df.index.union(df2.index)
df2 = (
df2.reindex(combined_index)
.interpolate(method="time")
.reindex(df.index)
)
if comparison_type == "absolute":
diff = df - df2
elif comparison_type == "percentage":
diff = (df - df2) / df2
elif comparison_type == "ratio":
diff = df / df2
else:
raise QueryObjectValidationError(
"Invalid `comparison_type`: {0}".format(comparison_type)
)
# remove leading/trailing NaNs from the time shift difference
diff = diff[diff.first_valid_index() : diff.last_valid_index()]
chart_data.extend(
self.to_series(
diff, classed="time-shift-{}".format(i), title_suffix=label
)
)
if not self.sort_series:
chart_data = sorted(chart_data, key=lambda x: tuple(x["key"]))
return chart_data
class MultiLineViz(NVD3Viz):
"""Pile on multiple line charts"""
viz_type = "line_multi"
verbose_name = _("Time Series - Multiple Line Charts")
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
return {}
def get_data(self, df: pd.DataFrame) -> VizData:
fd = self.form_data
# Late imports to avoid circular import issues
from superset import db
from superset.models.slice import Slice
slice_ids1 = fd.get("line_charts")
slices1 = db.session.query(Slice).filter(Slice.id.in_(slice_ids1)).all()
slice_ids2 = fd.get("line_charts_2")
slices2 = db.session.query(Slice).filter(Slice.id.in_(slice_ids2)).all()
return {
"slices": {
"axis1": [slc.data for slc in slices1],
"axis2": [slc.data for slc in slices2],
}
}
class NVD3DualLineViz(NVD3Viz):
"""A rich line chart with dual axis"""
viz_type = "dual_line"
verbose_name = _("Time Series - Dual Axis Line Chart")
sort_series = False
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
m1 = self.form_data.get("metric")
m2 = self.form_data.get("metric_2")
d["metrics"] = [m1, m2]
if not m1:
raise QueryObjectValidationError(_("Pick a metric for left axis!"))
if not m2:
raise QueryObjectValidationError(_("Pick a metric for right axis!"))
if m1 == m2:
raise QueryObjectValidationError(
_("Please choose different metrics" " on left and right axis")
)
return d
def to_series(self, df: pd.DataFrame, classed: str = "") -> List[Dict[str, Any]]:
cols = []
for col in df.columns:
if col == "":
cols.append("N/A")
elif col is None:
cols.append("NULL")
else:
cols.append(col)
df.columns = cols
series = df.to_dict("series")
chart_data = []
metrics = [self.form_data["metric"], self.form_data["metric_2"]]
for i, m in enumerate(metrics):
m = utils.get_metric_name(m)
ys = series[m]
if df[m].dtype.kind not in "biufc":
continue
series_title = m
d = {
"key": series_title,
"classed": classed,
"values": [
{"x": ds, "y": ys[ds] if ds in ys else None} for ds in df.index
],
"yAxis": i + 1,
"type": "line",
}
chart_data.append(d)
return chart_data
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
if self.form_data.get("granularity") == "all":
raise QueryObjectValidationError(
_("Pick a time granularity for your time series")
)
metric = utils.get_metric_name(fd["metric"])
metric_2 = utils.get_metric_name(fd["metric_2"])
df = df.pivot_table(index=DTTM_ALIAS, values=[metric, metric_2])
chart_data = self.to_series(df)
return chart_data
class NVD3TimeSeriesBarViz(NVD3TimeSeriesViz):
"""A bar chart where the x axis is time"""
viz_type = "bar"
sort_series = True
verbose_name = _("Time Series - Bar Chart")
class NVD3TimePivotViz(NVD3TimeSeriesViz):
"""Time Series - Periodicity Pivot"""
viz_type = "time_pivot"
sort_series = True
verbose_name = _("Time Series - Period Pivot")
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
d["metrics"] = [self.form_data.get("metric")]
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
df = self.process_data(df)
freq = to_offset(fd.get("freq"))
try:
freq = type(freq)(freq.n, normalize=True, **freq.kwds)
except ValueError:
freq = type(freq)(freq.n, **freq.kwds)
df.index.name = None
df[DTTM_ALIAS] = df.index.map(freq.rollback)
df["ranked"] = df[DTTM_ALIAS].rank(method="dense", ascending=False) - 1
df.ranked = df.ranked.map(int)
df["series"] = "-" + df.ranked.map(str)
df["series"] = df["series"].str.replace("-0", "current")
rank_lookup = {
row["series"]: row["ranked"] for row in df.to_dict(orient="records")
}
max_ts = df[DTTM_ALIAS].max()
max_rank = df["ranked"].max()
df[DTTM_ALIAS] = df.index + (max_ts - df[DTTM_ALIAS])
df = df.pivot_table(
index=DTTM_ALIAS,
columns="series",
values=utils.get_metric_name(fd["metric"]),
)
chart_data = self.to_series(df)
for serie in chart_data:
serie["rank"] = rank_lookup[serie["key"]]
serie["perc"] = 1 - (serie["rank"] / (max_rank + 1))
return chart_data
class NVD3CompareTimeSeriesViz(NVD3TimeSeriesViz):
"""A line chart component where you can compare the % change over time"""
viz_type = "compare"
verbose_name = _("Time Series - Percent Change")
class NVD3TimeSeriesStackedViz(NVD3TimeSeriesViz):
"""A rich stack area chart"""
viz_type = "area"
verbose_name = _("Time Series - Stacked")
sort_series = True
pivot_fill_value = 0
class HistogramViz(BaseViz):
"""Histogram"""
viz_type = "histogram"
verbose_name = _("Histogram")
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
"""Returns the query object for this visualization"""
d = super().query_obj()
d["row_limit"] = self.form_data.get("row_limit", int(config["VIZ_ROW_LIMIT"]))
numeric_columns = self.form_data.get("all_columns_x")
if numeric_columns is None:
raise QueryObjectValidationError(
_("Must have at least one numeric column specified")
)
self.columns = numeric_columns
d["columns"] = numeric_columns + self.groupby
# override groupby entry to avoid aggregation
d["groupby"] = []
return d
def labelify(self, keys: Union[List[str], str], column: str) -> str:
if isinstance(keys, str):
keys = [keys]
# removing undesirable characters
labels = [re.sub(r"\W+", r"_", k) for k in keys]
if len(self.columns) > 1 or not self.groupby:
# Only show numeric column in label if there are many
labels = [column] + labels
return "__".join(labels)
def get_data(self, df: pd.DataFrame) -> VizData:
"""Returns the chart data"""
if df.empty:
return None
chart_data = []
if len(self.groupby) > 0:
groups = df.groupby(self.groupby)
else:
groups = [((), df)]
for keys, data in groups:
chart_data.extend(
[
{
"key": self.labelify(keys, column),
"values": data[column].tolist(),
}
for column in self.columns
]
)
return chart_data
class DistributionBarViz(BaseViz):
"""A good old bar chart"""
viz_type = "dist_bar"
verbose_name = _("Distribution - Bar Chart")
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
if len(d["groupby"]) < len(fd.get("groupby") or []) + len(
fd.get("columns") or []
):
raise QueryObjectValidationError(
_("Can't have overlap between Series and Breakdowns")
)
if not fd.get("metrics"):
raise QueryObjectValidationError(_("Pick at least one metric"))
if not fd.get("groupby"):
raise QueryObjectValidationError(_("Pick at least one field for [Series]"))
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
metrics = self.metric_labels
columns = fd.get("columns") or []
# pandas will throw away nulls when grouping/pivoting,
# so we substitute NULL_STRING for any nulls in the necessary columns
filled_cols = self.groupby + columns
df[filled_cols] = df[filled_cols].fillna(value=NULL_STRING)
row = df.groupby(self.groupby).sum()[metrics[0]].copy()
row.sort_values(ascending=False, inplace=True)
pt = df.pivot_table(index=self.groupby, columns=columns, values=metrics)
if fd.get("contribution"):
pt = pt.T
pt = (pt / pt.sum()).T
pt = pt.reindex(row.index)
chart_data = []
for name, ys in pt.items():
if pt[name].dtype.kind not in "biufc" or name in self.groupby:
continue
if isinstance(name, str):
series_title = name
else:
offset = 0 if len(metrics) > 1 else 1
series_title = ", ".join([str(s) for s in name[offset:]])
values = []
for i, v in ys.items():
x = i
if isinstance(x, (tuple, list)):
x = ", ".join([str(s) for s in x])
else:
x = str(x)
values.append({"x": x, "y": v})
d = {"key": series_title, "values": values}
chart_data.append(d)
return chart_data
class SunburstViz(BaseViz):
"""A multi level sunburst chart"""
viz_type = "sunburst"
verbose_name = _("Sunburst")
is_timeseries = False
credits = (
"Kerry Rodden "
'@<a href="https://bl.ocks.org/kerryrodden/7090426">bl.ocks.org</a>'
)
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
cols = fd.get("groupby") or []
cols.extend(["m1", "m2"])
metric = utils.get_metric_name(fd["metric"])
secondary_metric = (
utils.get_metric_name(fd["secondary_metric"])
if "secondary_metric" in fd
else None
)
if metric == secondary_metric or secondary_metric is None:
df.rename(columns={df.columns[-1]: "m1"}, inplace=True)
df["m2"] = df["m1"]
else:
df.rename(columns={df.columns[-2]: "m1"}, inplace=True)
df.rename(columns={df.columns[-1]: "m2"}, inplace=True)
# Re-order the columns as the query result set column ordering may differ from
# that listed in the hierarchy.
df = df[cols]
return df.to_numpy().tolist()
def query_obj(self) -> QueryObjectDict:
qry = super().query_obj()
fd = self.form_data
qry["metrics"] = [fd["metric"]]
secondary_metric = fd.get("secondary_metric")
if secondary_metric and secondary_metric != fd["metric"]:
qry["metrics"].append(secondary_metric)
return qry
class SankeyViz(BaseViz):
"""A Sankey diagram that requires a parent-child dataset"""
viz_type = "sankey"
verbose_name = _("Sankey")
is_timeseries = False
credits = '<a href="https://www.npmjs.com/package/d3-sankey">d3-sankey on npm</a>'
def query_obj(self) -> QueryObjectDict:
qry = super().query_obj()
if len(qry["groupby"]) != 2:
raise QueryObjectValidationError(
_("Pick exactly 2 columns as [Source / Target]")
)
qry["metrics"] = [self.form_data["metric"]]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
source, target = self.groupby
(value,) = self.metric_labels
df.rename(
columns={source: "source", target: "target", value: "value",}, inplace=True,
)
df["source"] = df["source"].astype(str)
df["target"] = df["target"].astype(str)
recs = df.to_dict(orient="records")
hierarchy: Dict[str, Set[str]] = defaultdict(set)
for row in recs:
hierarchy[row["source"]].add(row["target"])
def find_cycle(g: Dict[str, Set[str]]) -> Optional[Tuple[str, str]]:
"""Whether there's a cycle in a directed graph"""
path = set()
def visit(vertex: str) -> Optional[Tuple[str, str]]:
path.add(vertex)
for neighbour in g.get(vertex, ()):
if neighbour in path or visit(neighbour):
return (vertex, neighbour)
path.remove(vertex)
return None
for v in g:
cycle = visit(v)
if cycle:
return cycle
return None
cycle = find_cycle(hierarchy)
if cycle:
raise QueryObjectValidationError(
_(
"There's a loop in your Sankey, please provide a tree. "
"Here's a faulty link: {}"
).format(cycle)
)
return recs
class DirectedForceViz(BaseViz):
"""An animated directed force layout graph visualization"""
viz_type = "directed_force"
verbose_name = _("Directed Force Layout")
credits = 'd3noob @<a href="http://bl.ocks.org/d3noob/5141278">bl.ocks.org</a>'
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
qry = super().query_obj()
if len(self.form_data["groupby"]) != 2:
raise QueryObjectValidationError(_("Pick exactly 2 columns to 'Group By'"))
qry["metrics"] = [self.form_data["metric"]]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df.columns = ["source", "target", "value"]
return df.to_dict(orient="records")
class ChordViz(BaseViz):
"""A Chord diagram"""
viz_type = "chord"
verbose_name = _("Directed Force Layout")
credits = '<a href="https://github.com/d3/d3-chord">Bostock</a>'
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
qry = super().query_obj()
fd = self.form_data
qry["groupby"] = [fd.get("groupby"), fd.get("columns")]
qry["metrics"] = [fd.get("metric")]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
df.columns = ["source", "target", "value"]
# Preparing a symetrical matrix like d3.chords calls for
nodes = list(set(df["source"]) | set(df["target"]))
matrix = {}
for source, target in product(nodes, nodes):
matrix[(source, target)] = 0
for source, target, value in df.to_records(index=False):
matrix[(source, target)] = value
m = [[matrix[(n1, n2)] for n1 in nodes] for n2 in nodes]
return {"nodes": list(nodes), "matrix": m}
class CountryMapViz(BaseViz):
"""A country centric"""
viz_type = "country_map"
verbose_name = _("Country Map")
is_timeseries = False
credits = "From bl.ocks.org By john-guerra"
def query_obj(self) -> QueryObjectDict:
qry = super().query_obj()
qry["metrics"] = [self.form_data["metric"]]
qry["groupby"] = [self.form_data["entity"]]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
cols = [fd.get("entity")]
metric = self.metric_labels[0]
cols += [metric]
ndf = df[cols]
df = ndf
df.columns = ["country_id", "metric"]
d = df.to_dict(orient="records")
return d
class WorldMapViz(BaseViz):
"""A country centric world map"""
viz_type = "world_map"
verbose_name = _("World Map")
is_timeseries = False
credits = 'datamaps on <a href="https://www.npmjs.com/package/datamaps">npm</a>'
def query_obj(self) -> QueryObjectDict:
qry = super().query_obj()
qry["groupby"] = [self.form_data["entity"]]
return qry
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
from superset.examples import countries
fd = self.form_data
cols = [fd.get("entity")]
metric = utils.get_metric_name(fd["metric"])
secondary_metric = (
utils.get_metric_name(fd["secondary_metric"])
if "secondary_metric" in fd
else None
)
columns = ["country", "m1", "m2"]
if metric == secondary_metric:
ndf = df[cols]
ndf["m1"] = df[metric]
ndf["m2"] = ndf["m1"]
else:
if secondary_metric:
cols += [metric, secondary_metric]
else:
cols += [metric]
columns = ["country", "m1"]
ndf = df[cols]
df = ndf
df.columns = columns
d = df.to_dict(orient="records")
for row in d:
country = None
if isinstance(row["country"], str):
if "country_fieldtype" in fd:
country = countries.get(fd["country_fieldtype"], row["country"])
if country:
row["country"] = country["cca3"]
row["latitude"] = country["lat"]
row["longitude"] = country["lng"]
row["name"] = country["name"]
else:
row["country"] = "XXX"
return d
class FilterBoxViz(BaseViz):
"""A multi filter, multi-choice filter box to make dashboards interactive"""
viz_type = "filter_box"
verbose_name = _("Filters")
is_timeseries = False
credits = 'a <a href="https://github.com/airbnb/superset">Superset</a> original'
cache_type = "get_data"
filter_row_limit = 1000
def query_obj(self) -> QueryObjectDict:
return {}
def run_extra_queries(self) -> None:
qry = super().query_obj()
filters = self.form_data.get("filter_configs") or []
qry["row_limit"] = self.filter_row_limit
self.dataframes = {}
for flt in filters:
col = flt.get("column")
if not col:
raise QueryObjectValidationError(
_("Invalid filter configuration, please select a column")
)
qry["groupby"] = [col]
metric = flt.get("metric")
qry["metrics"] = [metric] if metric else []
df = self.get_df_payload(query_obj=qry).get("df")
self.dataframes[col] = df
def get_data(self, df: pd.DataFrame) -> VizData:
filters = self.form_data.get("filter_configs") or []
d = {}
for flt in filters:
col = flt.get("column")
metric = flt.get("metric")
df = self.dataframes.get(col)
if df is not None and not df.empty:
if metric:
df = df.sort_values(
utils.get_metric_name(metric), ascending=flt.get("asc")
)
d[col] = [
{"id": row[0], "text": row[0], "metric": row[1]}
for row in df.itertuples(index=False)
]
else:
df = df.sort_values(col, ascending=flt.get("asc"))
d[col] = [
{"id": row[0], "text": row[0]}
for row in df.itertuples(index=False)
]
else:
df[col] = []
return d
class ParallelCoordinatesViz(BaseViz):
"""Interactive parallel coordinate implementation
Uses this amazing javascript library
https://github.com/syntagmatic/parallel-coordinates
"""
viz_type = "para"
verbose_name = _("Parallel Coordinates")
credits = (
'<a href="https://syntagmatic.github.io/parallel-coordinates/">'
"Syntagmatic's library</a>"
)
is_timeseries = False
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
d["groupby"] = [fd.get("series")]
return d
def get_data(self, df: pd.DataFrame) -> VizData:
return df.to_dict(orient="records")
class HeatmapViz(BaseViz):
"""A nice heatmap visualization that support high density through canvas"""
viz_type = "heatmap"
verbose_name = _("Heatmap")
is_timeseries = False
credits = (
'inspired from mbostock @<a href="http://bl.ocks.org/mbostock/3074470">'
"bl.ocks.org</a>"
)
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
d["metrics"] = [fd.get("metric")]
d["groupby"] = [fd.get("all_columns_x"), fd.get("all_columns_y")]
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
x = fd.get("all_columns_x")
y = fd.get("all_columns_y")
v = self.metric_labels[0]
if x == y:
df.columns = ["x", "y", "v"]
else:
df = df[[x, y, v]]
df.columns = ["x", "y", "v"]
norm = fd.get("normalize_across")
overall = False
max_ = df.v.max()
min_ = df.v.min()
if norm == "heatmap":
overall = True
else:
gb = df.groupby(norm, group_keys=False)
if len(gb) <= 1:
overall = True
else:
df["perc"] = gb.apply(
lambda x: (x.v - x.v.min()) / (x.v.max() - x.v.min())
)
df["rank"] = gb.apply(lambda x: x.v.rank(pct=True))
if overall:
df["perc"] = (df.v - min_) / (max_ - min_)
df["rank"] = df.v.rank(pct=True)
return {"records": df.to_dict(orient="records"), "extents": [min_, max_]}
class HorizonViz(NVD3TimeSeriesViz):
"""Horizon chart
https://www.npmjs.com/package/d3-horizon-chart
"""
viz_type = "horizon"
verbose_name = _("Horizon Charts")
credits = (
'<a href="https://www.npmjs.com/package/d3-horizon-chart">'
"d3-horizon-chart</a>"
)
class MapboxViz(BaseViz):
"""Rich maps made with Mapbox"""
viz_type = "mapbox"
verbose_name = _("Mapbox")
is_timeseries = False
credits = "<a href=https://www.mapbox.com/mapbox-gl-js/api/>Mapbox GL JS</a>"
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
fd = self.form_data
label_col = fd.get("mapbox_label")
if not fd.get("groupby"):
if fd.get("all_columns_x") is None or fd.get("all_columns_y") is None:
raise QueryObjectValidationError(
_("[Longitude] and [Latitude] must be set")
)
d["columns"] = [fd.get("all_columns_x"), fd.get("all_columns_y")]
if label_col and len(label_col) >= 1:
if label_col[0] == "count":
raise QueryObjectValidationError(
_(
"Must have a [Group By] column to have 'count' as the "
+ "[Label]"
)
)
d["columns"].append(label_col[0])
if fd.get("point_radius") != "Auto":
d["columns"].append(fd.get("point_radius"))
d["columns"] = list(set(d["columns"]))
else:
# Ensuring columns chosen are all in group by
if (
label_col
and len(label_col) >= 1
and label_col[0] != "count"
and label_col[0] not in fd["groupby"]
):
raise QueryObjectValidationError(
_("Choice of [Label] must be present in [Group By]")
)
if (
fd.get("point_radius") != "Auto"
and fd.get("point_radius") not in fd["groupby"]
):
raise QueryObjectValidationError(
_("Choice of [Point Radius] must be present in [Group By]")
)
if (
fd.get("all_columns_x") not in fd["groupby"]
or fd.get("all_columns_y") not in fd["groupby"]
):
raise QueryObjectValidationError(
_(
"[Longitude] and [Latitude] columns must be present in "
+ "[Group By]"
)
)
return d
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
label_col = fd.get("mapbox_label")
has_custom_metric = label_col is not None and len(label_col) > 0
metric_col = [None] * len(df.index)
if has_custom_metric:
if label_col[0] == fd.get("all_columns_x"): # type: ignore
metric_col = df[fd.get("all_columns_x")]
elif label_col[0] == fd.get("all_columns_y"): # type: ignore
metric_col = df[fd.get("all_columns_y")]
else:
metric_col = df[label_col[0]] # type: ignore
point_radius_col = (
[None] * len(df.index)
if fd.get("point_radius") == "Auto"
else df[fd.get("point_radius")]
)
# limiting geo precision as long decimal values trigger issues
# around json-bignumber in Mapbox
GEO_PRECISION = 10
# using geoJSON formatting
geo_json = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {"metric": metric, "radius": point_radius},
"geometry": {
"type": "Point",
"coordinates": [
round(lon, GEO_PRECISION),
round(lat, GEO_PRECISION),
],
},
}
for lon, lat, metric, point_radius in zip(
df[fd.get("all_columns_x")],
df[fd.get("all_columns_y")],
metric_col,
point_radius_col,
)
],
}
x_series, y_series = df[fd.get("all_columns_x")], df[fd.get("all_columns_y")]
south_west = [x_series.min(), y_series.min()]
north_east = [x_series.max(), y_series.max()]
return {
"geoJSON": geo_json,
"hasCustomMetric": has_custom_metric,
"mapboxApiKey": config["MAPBOX_API_KEY"],
"mapStyle": fd.get("mapbox_style"),
"aggregatorName": fd.get("pandas_aggfunc"),
"clusteringRadius": fd.get("clustering_radius"),
"pointRadiusUnit": fd.get("point_radius_unit"),
"globalOpacity": fd.get("global_opacity"),
"bounds": [south_west, north_east],
"renderWhileDragging": fd.get("render_while_dragging"),
"tooltip": fd.get("rich_tooltip"),
"color": fd.get("mapbox_color"),
}
class DeckGLMultiLayer(BaseViz):
"""Pile on multiple DeckGL layers"""
viz_type = "deck_multi"
verbose_name = _("Deck.gl - Multiple Layers")
is_timeseries = False
credits = '<a href="https://uber.github.io/deck.gl/">deck.gl</a>'
def query_obj(self) -> QueryObjectDict:
return {}
def get_data(self, df: pd.DataFrame) -> VizData:
fd = self.form_data
# Late imports to avoid circular import issues
from superset import db
from superset.models.slice import Slice
slice_ids = fd.get("deck_slices")
slices = db.session.query(Slice).filter(Slice.id.in_(slice_ids)).all()
return {
"mapboxApiKey": config["MAPBOX_API_KEY"],
"slices": [slc.data for slc in slices],
}
class BaseDeckGLViz(BaseViz):
"""Base class for deck.gl visualizations"""
is_timeseries = False
credits = '<a href="https://uber.github.io/deck.gl/">deck.gl</a>'
spatial_control_keys: List[str] = []
def get_metrics(self) -> List[str]:
self.metric = self.form_data.get("size")
return [self.metric] if self.metric else []
def process_spatial_query_obj(self, key: str, group_by: List[str]) -> None:
group_by.extend(self.get_spatial_columns(key))
def get_spatial_columns(self, key: str) -> List[str]:
spatial = self.form_data.get(key)
if spatial is None:
raise ValueError(_("Bad spatial key"))
if spatial.get("type") == "latlong":
return [spatial.get("lonCol"), spatial.get("latCol")]
elif spatial.get("type") == "delimited":
return [spatial.get("lonlatCol")]
elif spatial.get("type") == "geohash":
return [spatial.get("geohashCol")]
return []
@staticmethod
def parse_coordinates(s: Any) -> Optional[Tuple[float, float]]:
if not s:
return None
try:
p = Point(s)
return (p.latitude, p.longitude)
except Exception:
raise SpatialException(_("Invalid spatial point encountered: %s" % s))
@staticmethod
def reverse_geohash_decode(geohash_code: str) -> Tuple[str, str]:
lat, lng = geohash.decode(geohash_code)
return (lng, lat)
@staticmethod
def reverse_latlong(df: pd.DataFrame, key: str) -> None:
df[key] = [tuple(reversed(o)) for o in df[key] if isinstance(o, (list, tuple))]
def process_spatial_data_obj(self, key: str, df: pd.DataFrame) -> pd.DataFrame:
spatial = self.form_data.get(key)
if spatial is None:
raise ValueError(_("Bad spatial key"))
if spatial.get("type") == "latlong":
df[key] = list(
zip(
pd.to_numeric(df[spatial.get("lonCol")], errors="coerce"),
pd.to_numeric(df[spatial.get("latCol")], errors="coerce"),
)
)
elif spatial.get("type") == "delimited":
lon_lat_col = spatial.get("lonlatCol")
df[key] = df[lon_lat_col].apply(self.parse_coordinates)
del df[lon_lat_col]
elif spatial.get("type") == "geohash":
df[key] = df[spatial.get("geohashCol")].map(self.reverse_geohash_decode)
del df[spatial.get("geohashCol")]
if spatial.get("reverseCheckbox"):
self.reverse_latlong(df, key)
if df.get(key) is None:
raise NullValueException(
_(
"Encountered invalid NULL spatial entry, \
please consider filtering those out"
)
)
return df
def add_null_filters(self) -> None:
fd = self.form_data
spatial_columns = set()
for key in self.spatial_control_keys:
for column in self.get_spatial_columns(key):
spatial_columns.add(column)
if fd.get("adhoc_filters") is None:
fd["adhoc_filters"] = []
line_column = fd.get("line_column")
if line_column:
spatial_columns.add(line_column)
for column in sorted(spatial_columns):
filter_ = to_adhoc({"col": column, "op": "IS NOT NULL", "val": ""})
fd["adhoc_filters"].append(filter_)
def query_obj(self) -> QueryObjectDict:
fd = self.form_data
# add NULL filters
if fd.get("filter_nulls", True):
self.add_null_filters()
d = super().query_obj()
gb: List[str] = []
for key in self.spatial_control_keys:
self.process_spatial_query_obj(key, gb)
if fd.get("dimension"):
gb += [fd["dimension"]]
if fd.get("js_columns"):
gb += fd.get("js_columns") or []
metrics = self.get_metrics()
gb = list(set(gb))
if metrics:
d["groupby"] = gb
d["metrics"] = metrics
d["columns"] = []
else:
d["columns"] = gb
return d
def get_js_columns(self, d: Dict[str, Any]) -> Dict[str, Any]:
cols = self.form_data.get("js_columns") or []
return {col: d.get(col) for col in cols}
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
# Processing spatial info
for key in self.spatial_control_keys:
df = self.process_spatial_data_obj(key, df)
features = []
for d in df.to_dict(orient="records"):
feature = self.get_properties(d)
extra_props = self.get_js_columns(d)
if extra_props:
feature["extraProps"] = extra_props
features.append(feature)
return {
"features": features,
"mapboxApiKey": config["MAPBOX_API_KEY"],
"metricLabels": self.metric_labels,
}
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
raise NotImplementedError()
class DeckScatterViz(BaseDeckGLViz):
"""deck.gl's ScatterLayer"""
viz_type = "deck_scatter"
verbose_name = _("Deck.gl - Scatter plot")
spatial_control_keys = ["spatial"]
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
fd = self.form_data
self.is_timeseries = bool(fd.get("time_grain_sqla") or fd.get("granularity"))
self.point_radius_fixed = fd.get("point_radius_fixed") or {
"type": "fix",
"value": 500,
}
return super().query_obj()
def get_metrics(self) -> List[str]:
self.metric = None
if self.point_radius_fixed.get("type") == "metric":
self.metric = self.point_radius_fixed["value"]
return [self.metric]
return []
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
return {
"metric": d.get(self.metric_label) if self.metric_label else None,
"radius": self.fixed_value
if self.fixed_value
else d.get(self.metric_label)
if self.metric_label
else None,
"cat_color": d.get(self.dim) if self.dim else None,
"position": d.get("spatial"),
DTTM_ALIAS: d.get(DTTM_ALIAS),
}
def get_data(self, df: pd.DataFrame) -> VizData:
fd = self.form_data
self.metric_label = utils.get_metric_name(self.metric) if self.metric else None
self.point_radius_fixed = fd.get("point_radius_fixed")
self.fixed_value = None
self.dim = self.form_data.get("dimension")
if self.point_radius_fixed and self.point_radius_fixed.get("type") != "metric":
self.fixed_value = self.point_radius_fixed.get("value")
return super().get_data(df)
class DeckScreengrid(BaseDeckGLViz):
"""deck.gl's ScreenGridLayer"""
viz_type = "deck_screengrid"
verbose_name = _("Deck.gl - Screen Grid")
spatial_control_keys = ["spatial"]
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
fd = self.form_data
self.is_timeseries = bool(fd.get("time_grain_sqla") or fd.get("granularity"))
return super().query_obj()
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
return {
"position": d.get("spatial"),
"weight": (d.get(self.metric_label) if self.metric_label else None) or 1,
"__timestamp": d.get(DTTM_ALIAS) or d.get("__time"),
}
def get_data(self, df: pd.DataFrame) -> VizData:
self.metric_label = utils.get_metric_name(self.metric) if self.metric else None
return super().get_data(df)
class DeckGrid(BaseDeckGLViz):
"""deck.gl's DeckLayer"""
viz_type = "deck_grid"
verbose_name = _("Deck.gl - 3D Grid")
spatial_control_keys = ["spatial"]
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
return {
"position": d.get("spatial"),
"weight": (d.get(self.metric_label) if self.metric_label else None) or 1,
}
def get_data(self, df: pd.DataFrame) -> VizData:
self.metric_label = utils.get_metric_name(self.metric) if self.metric else None
return super().get_data(df)
def geohash_to_json(geohash_code: str) -> List[List[float]]:
p = geohash.bbox(geohash_code)
return [
[p.get("w"), p.get("n")],
[p.get("e"), p.get("n")],
[p.get("e"), p.get("s")],
[p.get("w"), p.get("s")],
[p.get("w"), p.get("n")],
]
class DeckPathViz(BaseDeckGLViz):
"""deck.gl's PathLayer"""
viz_type = "deck_path"
verbose_name = _("Deck.gl - Paths")
deck_viz_key = "path"
is_timeseries = True
deser_map = {
"json": json.loads,
"polyline": polyline.decode,
"geohash": geohash_to_json,
}
def query_obj(self) -> QueryObjectDict:
fd = self.form_data
self.is_timeseries = bool(fd.get("time_grain_sqla") or fd.get("granularity"))
d = super().query_obj()
self.metric = fd.get("metric")
line_col = fd.get("line_column")
if d["metrics"]:
self.has_metrics = True
d["groupby"].append(line_col)
else:
self.has_metrics = False
d["columns"].append(line_col)
return d
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
fd = self.form_data
line_type = fd["line_type"]
deser = self.deser_map[line_type]
line_column = fd["line_column"]
path = deser(d[line_column])
if fd.get("reverse_long_lat"):
path = [(o[1], o[0]) for o in path]
d[self.deck_viz_key] = path
if line_type != "geohash":
del d[line_column]
d["__timestamp"] = d.get(DTTM_ALIAS) or d.get("__time")
return d
def get_data(self, df: pd.DataFrame) -> VizData:
self.metric_label = utils.get_metric_name(self.metric) if self.metric else None
return super().get_data(df)
class DeckPolygon(DeckPathViz):
"""deck.gl's Polygon Layer"""
viz_type = "deck_polygon"
deck_viz_key = "polygon"
verbose_name = _("Deck.gl - Polygon")
def query_obj(self) -> QueryObjectDict:
fd = self.form_data
self.elevation = fd.get("point_radius_fixed") or {"type": "fix", "value": 500}
return super().query_obj()
def get_metrics(self) -> List[str]:
metrics = [self.form_data.get("metric")]
if self.elevation.get("type") == "metric":
metrics.append(self.elevation.get("value"))
return [metric for metric in metrics if metric]
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
super().get_properties(d)
fd = self.form_data
elevation = fd["point_radius_fixed"]["value"]
type_ = fd["point_radius_fixed"]["type"]
d["elevation"] = (
d.get(utils.get_metric_name(elevation)) if type_ == "metric" else elevation
)
return d
class DeckHex(BaseDeckGLViz):
"""deck.gl's DeckLayer"""
viz_type = "deck_hex"
verbose_name = _("Deck.gl - 3D HEX")
spatial_control_keys = ["spatial"]
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
return {
"position": d.get("spatial"),
"weight": (d.get(self.metric_label) if self.metric_label else None) or 1,
}
def get_data(self, df: pd.DataFrame) -> VizData:
self.metric_label = utils.get_metric_name(self.metric) if self.metric else None
return super(DeckHex, self).get_data(df)
class DeckGeoJson(BaseDeckGLViz):
"""deck.gl's GeoJSONLayer"""
viz_type = "deck_geojson"
verbose_name = _("Deck.gl - GeoJSON")
def query_obj(self) -> QueryObjectDict:
d = super().query_obj()
d["columns"] += [self.form_data.get("geojson")]
d["metrics"] = []
d["groupby"] = []
return d
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
geojson = d[self.form_data["geojson"]]
return json.loads(geojson)
class DeckArc(BaseDeckGLViz):
"""deck.gl's Arc Layer"""
viz_type = "deck_arc"
verbose_name = _("Deck.gl - Arc")
spatial_control_keys = ["start_spatial", "end_spatial"]
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
fd = self.form_data
self.is_timeseries = bool(fd.get("time_grain_sqla") or fd.get("granularity"))
return super().query_obj()
def get_properties(self, d: Dict[str, Any]) -> Dict[str, Any]:
dim = self.form_data.get("dimension")
return {
"sourcePosition": d.get("start_spatial"),
"targetPosition": d.get("end_spatial"),
"cat_color": d.get(dim) if dim else None,
DTTM_ALIAS: d.get(DTTM_ALIAS),
}
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
d = super().get_data(df)
return {
"features": d["features"], # type: ignore
"mapboxApiKey": config["MAPBOX_API_KEY"],
}
class EventFlowViz(BaseViz):
"""A visualization to explore patterns in event sequences"""
viz_type = "event_flow"
verbose_name = _("Event flow")
credits = 'from <a href="https://github.com/williaster/data-ui">@data-ui</a>'
is_timeseries = True
def query_obj(self) -> QueryObjectDict:
query = super().query_obj()
form_data = self.form_data
event_key = form_data["all_columns_x"]
entity_key = form_data["entity"]
meta_keys = [
col
for col in form_data["all_columns"]
if col != event_key and col != entity_key
]
query["columns"] = [event_key, entity_key] + meta_keys
if form_data["order_by_entity"]:
query["orderby"] = [(entity_key, True)]
return query
def get_data(self, df: pd.DataFrame) -> VizData:
return df.to_dict(orient="records")
class PairedTTestViz(BaseViz):
"""A table displaying paired t-test values"""
viz_type = "paired_ttest"
verbose_name = _("Time Series - Paired t-test")
sort_series = False
is_timeseries = True
def get_data(self, df: pd.DataFrame) -> VizData:
"""
Transform received data frame into an object of the form:
{
'metric1': [
{
groups: ('groupA', ... ),
values: [ {x, y}, ... ],
}, ...
], ...
}
"""
if df.empty:
return None
fd = self.form_data
groups = fd.get("groupby")
metrics = self.metric_labels
df = df.pivot_table(index=DTTM_ALIAS, columns=groups, values=metrics)
cols = []
# Be rid of falsey keys
for col in df.columns:
if col == "":
cols.append("N/A")
elif col is None:
cols.append("NULL")
else:
cols.append(col)
df.columns = cols
data: Dict[str, List[Dict[str, Any]]] = {}
series = df.to_dict("series")
for nameSet in df.columns:
# If no groups are defined, nameSet will be the metric name
hasGroup = not isinstance(nameSet, str)
Y = series[nameSet]
d = {
"group": nameSet[1:] if hasGroup else "All",
"values": [{"x": t, "y": Y[t] if t in Y else None} for t in df.index],
}
key = nameSet[0] if hasGroup else nameSet
if key in data:
data[key].append(d)
else:
data[key] = [d]
return data
class RoseViz(NVD3TimeSeriesViz):
viz_type = "rose"
verbose_name = _("Time Series - Nightingale Rose Chart")
sort_series = False
is_timeseries = True
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
data = super().get_data(df)
result: Dict[str, List[Dict[str, str]]] = {}
for datum in data: # type: ignore
key = datum["key"]
for val in datum["values"]:
timestamp = val["x"].value
if not result.get(timestamp):
result[timestamp] = []
value = 0 if math.isnan(val["y"]) else val["y"]
result[timestamp].append(
{
"key": key,
"value": value,
"name": ", ".join(key) if isinstance(key, list) else key,
"time": val["x"],
}
)
return result
class PartitionViz(NVD3TimeSeriesViz):
"""
A hierarchical data visualization with support for time series.
"""
viz_type = "partition"
verbose_name = _("Partition Diagram")
def query_obj(self) -> QueryObjectDict:
query_obj = super().query_obj()
time_op = self.form_data.get("time_series_option", "not_time")
# Return time series data if the user specifies so
query_obj["is_timeseries"] = time_op != "not_time"
return query_obj
def levels_for(
self, time_op: str, groups: List[str], df: pd.DataFrame
) -> Dict[int, pd.Series]:
"""
Compute the partition at each `level` from the dataframe.
"""
levels = {}
for i in range(0, len(groups) + 1):
agg_df = df.groupby(groups[:i]) if i else df
levels[i] = (
agg_df.mean()
if time_op == "agg_mean"
else agg_df.sum(numeric_only=True)
)
return levels
def levels_for_diff(
self, time_op: str, groups: List[str], df: pd.DataFrame
) -> Dict[int, pd.DataFrame]:
# Obtain a unique list of the time grains
times = list(set(df[DTTM_ALIAS]))
times.sort()
until = times[len(times) - 1]
since = times[0]
# Function describing how to calculate the difference
func = {
"point_diff": [pd.Series.sub, lambda a, b, fill_value: a - b],
"point_factor": [pd.Series.div, lambda a, b, fill_value: a / float(b)],
"point_percent": [
lambda a, b, fill_value=0: a.div(b, fill_value=fill_value) - 1,
lambda a, b, fill_value: a / float(b) - 1,
],
}[time_op]
agg_df = df.groupby(DTTM_ALIAS).sum()
levels = {
0: pd.Series(
{
m: func[1](agg_df[m][until], agg_df[m][since], 0)
for m in agg_df.columns
}
)
}
for i in range(1, len(groups) + 1):
agg_df = df.groupby([DTTM_ALIAS] + groups[:i]).sum()
levels[i] = pd.DataFrame(
{
m: func[0](agg_df[m][until], agg_df[m][since], fill_value=0)
for m in agg_df.columns
}
)
return levels
def levels_for_time(
self, groups: List[str], df: pd.DataFrame
) -> Dict[int, VizData]:
procs = {}
for i in range(0, len(groups) + 1):
self.form_data["groupby"] = groups[:i]
df_drop = df.drop(groups[i:], 1)
procs[i] = self.process_data(df_drop, aggregate=True)
self.form_data["groupby"] = groups
return procs
def nest_values(
self,
levels: Dict[int, pd.DataFrame],
level: int = 0,
metric: Optional[str] = None,
dims: Optional[List[str]] = None,
) -> List[Dict[str, Any]]:
"""
Nest values at each level on the back-end with
access and setting, instead of summing from the bottom.
"""
if dims is None:
dims = []
if not level:
return [
{
"name": m,
"val": levels[0][m],
"children": self.nest_values(levels, 1, m),
}
for m in levels[0].index
]
if level == 1:
metric_level = levels[1][metric]
return [
{
"name": i,
"val": metric_level[i],
"children": self.nest_values(levels, 2, metric, [i]),
}
for i in metric_level.index
]
if level >= len(levels):
return []
dim_level = levels[level][metric][[dims[0]]]
return [
{
"name": i,
"val": dim_level[i],
"children": self.nest_values(levels, level + 1, metric, dims + [i]),
}
for i in dim_level.index
]
def nest_procs(
self,
procs: Dict[int, pd.DataFrame],
level: int = -1,
dims: Optional[Tuple[str, ...]] = None,
time: Any = None,
) -> List[Dict[str, Any]]:
if dims is None:
dims = ()
if level == -1:
return [
{"name": m, "children": self.nest_procs(procs, 0, (m,))}
for m in procs[0].columns
]
if not level:
return [
{
"name": t,
"val": procs[0][dims[0]][t],
"children": self.nest_procs(procs, 1, dims, t),
}
for t in procs[0].index
]
if level >= len(procs):
return []
return [
{
"name": i,
"val": procs[level][dims][i][time],
"children": self.nest_procs(procs, level + 1, dims + (i,), time),
}
for i in procs[level][dims].columns
]
def get_data(self, df: pd.DataFrame) -> VizData:
if df.empty:
return None
fd = self.form_data
groups = fd.get("groupby", [])
time_op = fd.get("time_series_option", "not_time")
if not len(groups):
raise ValueError("Please choose at least one groupby")
if time_op == "not_time":
levels = self.levels_for("agg_sum", groups, df)
elif time_op in ["agg_sum", "agg_mean"]:
levels = self.levels_for(time_op, groups, df)
elif time_op in ["point_diff", "point_factor", "point_percent"]:
levels = self.levels_for_diff(time_op, groups, df)
elif time_op == "adv_anal":
procs = self.levels_for_time(groups, df)
return self.nest_procs(procs)
else:
levels = self.levels_for("agg_sum", [DTTM_ALIAS] + groups, df)
return self.nest_values(levels)
viz_types = {
o.viz_type: o
for o in globals().values()
if (
inspect.isclass(o)
and issubclass(o, BaseViz)
and o.viz_type not in config["VIZ_TYPE_DENYLIST"]
)
}
|
airbnb/superset
|
superset/viz.py
|
Python
|
apache-2.0
| 104,323
|
import os.path
import warnings
from glob import glob
from io import BytesIO
from numbers import Number
from pathlib import Path
from textwrap import dedent
from typing import (
TYPE_CHECKING,
Callable,
Dict,
Hashable,
Iterable,
Mapping,
Tuple,
Union,
)
import numpy as np
from .. import DataArray, Dataset, auto_combine, backends, coding, conventions
from ..core import indexing
from ..core.combine import (
_infer_concat_order_from_positions,
_nested_combine,
combine_by_coords,
)
from ..core.utils import close_on_error, is_grib_path, is_remote_uri
from .common import AbstractDataStore, ArrayWriter
from .locks import _get_scheduler
if TYPE_CHECKING:
try:
from dask.delayed import Delayed
except ImportError:
Delayed = None
DATAARRAY_NAME = "__xarray_dataarray_name__"
DATAARRAY_VARIABLE = "__xarray_dataarray_variable__"
def _get_default_engine_remote_uri():
try:
import netCDF4 # noqa: F401
engine = "netcdf4"
except ImportError: # pragma: no cover
try:
import pydap # noqa: F401
engine = "pydap"
except ImportError:
raise ValueError(
"netCDF4 or pydap is required for accessing "
"remote datasets via OPeNDAP"
)
return engine
def _get_default_engine_grib():
msgs = []
try:
import Nio # noqa: F401
msgs += ["set engine='pynio' to access GRIB files with PyNIO"]
except ImportError: # pragma: no cover
pass
try:
import cfgrib # noqa: F401
msgs += ["set engine='cfgrib' to access GRIB files with cfgrib"]
except ImportError: # pragma: no cover
pass
if msgs:
raise ValueError(" or\n".join(msgs))
else:
raise ValueError("PyNIO or cfgrib is required for accessing " "GRIB files")
def _get_default_engine_gz():
try:
import scipy # noqa: F401
engine = "scipy"
except ImportError: # pragma: no cover
raise ValueError("scipy is required for accessing .gz files")
return engine
def _get_default_engine_netcdf():
try:
import netCDF4 # noqa: F401
engine = "netcdf4"
except ImportError: # pragma: no cover
try:
import scipy.io.netcdf # noqa: F401
engine = "scipy"
except ImportError:
raise ValueError(
"cannot read or write netCDF files without "
"netCDF4-python or scipy installed"
)
return engine
def _get_engine_from_magic_number(filename_or_obj):
# check byte header to determine file type
if isinstance(filename_or_obj, bytes):
magic_number = filename_or_obj[:8]
else:
if filename_or_obj.tell() != 0:
raise ValueError(
"file-like object read/write pointer not at zero "
"please close and reopen, or use a context "
"manager"
)
magic_number = filename_or_obj.read(8)
filename_or_obj.seek(0)
if magic_number.startswith(b"CDF"):
engine = "scipy"
elif magic_number.startswith(b"\211HDF\r\n\032\n"):
engine = "h5netcdf"
if isinstance(filename_or_obj, bytes):
raise ValueError(
"can't open netCDF4/HDF5 as bytes "
"try passing a path or file-like object"
)
else:
if isinstance(filename_or_obj, bytes) and len(filename_or_obj) > 80:
filename_or_obj = filename_or_obj[:80] + b"..."
raise ValueError(
"{} is not a valid netCDF file "
"did you mean to pass a string for a path instead?".format(filename_or_obj)
)
return engine
def _get_default_engine(path, allow_remote=False):
if allow_remote and is_remote_uri(path):
engine = _get_default_engine_remote_uri()
elif is_grib_path(path):
engine = _get_default_engine_grib()
elif path.endswith(".gz"):
engine = _get_default_engine_gz()
else:
engine = _get_default_engine_netcdf()
return engine
def _normalize_path(path):
if is_remote_uri(path):
return path
else:
return os.path.abspath(os.path.expanduser(path))
def _validate_dataset_names(dataset):
"""DataArray.name and Dataset keys must be a string or None"""
def check_name(name):
if isinstance(name, str):
if not name:
raise ValueError(
"Invalid name for DataArray or Dataset key: "
"string must be length 1 or greater for "
"serialization to netCDF files"
)
elif name is not None:
raise TypeError(
"DataArray.name or Dataset key must be either a "
"string or None for serialization to netCDF files"
)
for k in dataset.variables:
check_name(k)
def _validate_attrs(dataset):
"""`attrs` must have a string key and a value which is either: a number,
a string, an ndarray or a list/tuple of numbers/strings.
"""
def check_attr(name, value):
if isinstance(name, str):
if not name:
raise ValueError(
"Invalid name for attr: string must be "
"length 1 or greater for serialization to "
"netCDF files"
)
else:
raise TypeError(
"Invalid name for attr: {} must be a string for "
"serialization to netCDF files".format(name)
)
if not isinstance(value, (str, Number, np.ndarray, np.number, list, tuple)):
raise TypeError(
"Invalid value for attr: {} must be a number, "
"a string, an ndarray or a list/tuple of "
"numbers/strings for serialization to netCDF "
"files".format(value)
)
# Check attrs on the dataset itself
for k, v in dataset.attrs.items():
check_attr(k, v)
# Check attrs on each variable within the dataset
for variable in dataset.variables.values():
for k, v in variable.attrs.items():
check_attr(k, v)
def _protect_dataset_variables_inplace(dataset, cache):
for name, variable in dataset.variables.items():
if name not in variable.dims:
# no need to protect IndexVariable objects
data = indexing.CopyOnWriteArray(variable._data)
if cache:
data = indexing.MemoryCachedArray(data)
variable.data = data
def _finalize_store(write, store):
""" Finalize this store by explicitly syncing and closing"""
del write # ensure writing is done first
store.close()
def load_dataset(filename_or_obj, **kwargs):
"""Open, load into memory, and close a Dataset from a file or file-like
object.
This is a thin wrapper around :py:meth:`~xarray.open_dataset`. It differs
from `open_dataset` in that it loads the Dataset into memory, closes the
file, and returns the Dataset. In contrast, `open_dataset` keeps the file
handle open and lazy loads its contents. All parameters are passed directly
to `open_dataset`. See that documentation for further details.
Returns
-------
dataset : Dataset
The newly created Dataset.
See Also
--------
open_dataset
"""
if "cache" in kwargs:
raise TypeError("cache has no effect in this context")
with open_dataset(filename_or_obj, **kwargs) as ds:
return ds.load()
def load_dataarray(filename_or_obj, **kwargs):
"""Open, load into memory, and close a DataArray from a file or file-like
object containing a single data variable.
This is a thin wrapper around :py:meth:`~xarray.open_dataarray`. It differs
from `open_dataarray` in that it loads the Dataset into memory, closes the
file, and returns the Dataset. In contrast, `open_dataarray` keeps the file
handle open and lazy loads its contents. All parameters are passed directly
to `open_dataarray`. See that documentation for further details.
Returns
-------
datarray : DataArray
The newly created DataArray.
See Also
--------
open_dataarray
"""
if "cache" in kwargs:
raise TypeError("cache has no effect in this context")
with open_dataarray(filename_or_obj, **kwargs) as da:
return da.load()
def open_dataset(
filename_or_obj,
group=None,
decode_cf=True,
mask_and_scale=None,
decode_times=True,
autoclose=None,
concat_characters=True,
decode_coords=True,
engine=None,
chunks=None,
lock=None,
cache=None,
drop_variables=None,
backend_kwargs=None,
use_cftime=None,
):
"""Open and decode a dataset from a file or file-like object.
Parameters
----------
filename_or_obj : str, Path, file or xarray.backends.*DataStore
Strings and Path objects are interpreted as a path to a netCDF file
or an OpenDAP URL and opened with python-netCDF4, unless the filename
ends with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
netCDF4 files).
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. mask_and_scale defaults to True except for the
pseudonetcdf backend.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
autoclose : bool, optional
If True, automatically close files to avoid OS Error of too many files
being open. However, this option doesn't work with streams, e.g.,
BytesIO.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib', \
'pseudonetcdf'}, optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
chunks : int or dict, optional
If chunks is provided, it used to load the new dataset into dask
arrays. ``chunks={}`` loads the dataset with dask using a single
chunk for all arrays.
lock : False or duck threading.Lock, optional
Resource lock to use when reading data from disk. Only relevant when
using dask or another form of parallelism. By default, appropriate
locks are chosen to safely read and write files with the currently
active dask scheduler.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
backend_kwargs: dictionary, optional
A dictionary of keyword arguments to pass on to the backend. This
may be useful when backend options would improve performance or
allow user control of dataset processing.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
Returns
-------
dataset : Dataset
The newly created dataset.
Notes
-----
``open_dataset`` opens the file with read-only access. When you modify
values of a Dataset, even one linked to files on disk, only the in-memory
copy you are manipulating in xarray is modified: the original file on disk
is never touched.
See Also
--------
open_mfdataset
"""
engines = [
None,
"netcdf4",
"scipy",
"pydap",
"h5netcdf",
"pynio",
"cfgrib",
"pseudonetcdf",
]
if engine not in engines:
raise ValueError(
"unrecognized engine for open_dataset: {}\n"
"must be one of: {}".format(engine, engines)
)
if autoclose is not None:
warnings.warn(
"The autoclose argument is no longer used by "
"xarray.open_dataset() and is now ignored; it will be removed in "
"a future version of xarray. If necessary, you can control the "
"maximum number of simultaneous open files with "
"xarray.set_options(file_cache_maxsize=...).",
FutureWarning,
stacklevel=2,
)
if mask_and_scale is None:
mask_and_scale = not engine == "pseudonetcdf"
if not decode_cf:
mask_and_scale = False
decode_times = False
concat_characters = False
decode_coords = False
if cache is None:
cache = chunks is None
if backend_kwargs is None:
backend_kwargs = {}
def maybe_decode_store(store, lock=False):
ds = conventions.decode_cf(
store,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
concat_characters=concat_characters,
decode_coords=decode_coords,
drop_variables=drop_variables,
use_cftime=use_cftime,
)
_protect_dataset_variables_inplace(ds, cache)
if chunks is not None:
from dask.base import tokenize
# if passed an actual file path, augment the token with
# the file modification time
if isinstance(filename_or_obj, str) and not is_remote_uri(filename_or_obj):
mtime = os.path.getmtime(filename_or_obj)
else:
mtime = None
token = tokenize(
filename_or_obj,
mtime,
group,
decode_cf,
mask_and_scale,
decode_times,
concat_characters,
decode_coords,
engine,
chunks,
drop_variables,
use_cftime,
)
name_prefix = "open_dataset-%s" % token
ds2 = ds.chunk(chunks, name_prefix=name_prefix, token=token)
ds2._file_obj = ds._file_obj
else:
ds2 = ds
return ds2
if isinstance(filename_or_obj, Path):
filename_or_obj = str(filename_or_obj)
if isinstance(filename_or_obj, AbstractDataStore):
store = filename_or_obj
elif isinstance(filename_or_obj, str):
filename_or_obj = _normalize_path(filename_or_obj)
if engine is None:
engine = _get_default_engine(filename_or_obj, allow_remote=True)
if engine == "netcdf4":
store = backends.NetCDF4DataStore.open(
filename_or_obj, group=group, lock=lock, **backend_kwargs
)
elif engine == "scipy":
store = backends.ScipyDataStore(filename_or_obj, **backend_kwargs)
elif engine == "pydap":
store = backends.PydapDataStore.open(filename_or_obj, **backend_kwargs)
elif engine == "h5netcdf":
store = backends.H5NetCDFStore(
filename_or_obj, group=group, lock=lock, **backend_kwargs
)
elif engine == "pynio":
store = backends.NioDataStore(filename_or_obj, lock=lock, **backend_kwargs)
elif engine == "pseudonetcdf":
store = backends.PseudoNetCDFDataStore.open(
filename_or_obj, lock=lock, **backend_kwargs
)
elif engine == "cfgrib":
store = backends.CfGribDataStore(
filename_or_obj, lock=lock, **backend_kwargs
)
else:
if engine not in [None, "scipy", "h5netcdf"]:
raise ValueError(
"can only read bytes or file-like objects "
"with engine='scipy' or 'h5netcdf'"
)
engine = _get_engine_from_magic_number(filename_or_obj)
if engine == "scipy":
store = backends.ScipyDataStore(filename_or_obj, **backend_kwargs)
elif engine == "h5netcdf":
store = backends.H5NetCDFStore(
filename_or_obj, group=group, lock=lock, **backend_kwargs
)
with close_on_error(store):
ds = maybe_decode_store(store)
# Ensure source filename always stored in dataset object (GH issue #2550)
if "source" not in ds.encoding:
if isinstance(filename_or_obj, str):
ds.encoding["source"] = filename_or_obj
return ds
def open_dataarray(
filename_or_obj,
group=None,
decode_cf=True,
mask_and_scale=None,
decode_times=True,
autoclose=None,
concat_characters=True,
decode_coords=True,
engine=None,
chunks=None,
lock=None,
cache=None,
drop_variables=None,
backend_kwargs=None,
use_cftime=None,
):
"""Open an DataArray from a file or file-like object containing a single
data variable.
This is designed to read netCDF files with only one data variable. If
multiple variables are present then a ValueError is raised.
Parameters
----------
filename_or_obj : str, Path, file or xarray.backends.*DataStore
Strings and Paths are interpreted as a path to a netCDF file or an
OpenDAP URL and opened with python-netCDF4, unless the filename ends
with .gz, in which case the file is gunzipped and opened with
scipy.io.netcdf (only netCDF3 supported). Byte-strings or file-like
objects are opened by scipy.io.netcdf (netCDF3) or h5py (netCDF4/HDF).
group : str, optional
Path to the netCDF4 group in the given file to open (only works for
netCDF4 files).
decode_cf : bool, optional
Whether to decode these variables, assuming they were saved according
to CF conventions.
mask_and_scale : bool, optional
If True, replace array values equal to `_FillValue` with NA and scale
values according to the formula `original_values * scale_factor +
add_offset`, where `_FillValue`, `scale_factor` and `add_offset` are
taken from variable attributes (if they exist). If the `_FillValue` or
`missing_value` attribute contains multiple values a warning will be
issued and all array values matching one of the multiple values will
be replaced by NA. mask_and_scale defaults to True except for the
pseudonetcdf backend.
decode_times : bool, optional
If True, decode times encoded in the standard NetCDF datetime format
into datetime objects. Otherwise, leave them encoded as numbers.
concat_characters : bool, optional
If True, concatenate along the last dimension of character arrays to
form string arrays. Dimensions will only be concatenated over (and
removed) if they have no corresponding variable and if they are only
used as the last dimension of character arrays.
decode_coords : bool, optional
If True, decode the 'coordinates' attribute to identify coordinates in
the resulting dataset.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \
optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
chunks : int or dict, optional
If chunks is provided, it used to load the new dataset into dask
arrays.
lock : False or duck threading.Lock, optional
Resource lock to use when reading data from disk. Only relevant when
using dask or another form of parallelism. By default, appropriate
locks are chosen to safely read and write files with the currently
active dask scheduler.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False. Does not
change the behavior of coordinates corresponding to dimensions, which
always load their data from disk into a ``pandas.Index``.
drop_variables: string or iterable, optional
A variable or list of variables to exclude from being parsed from the
dataset. This may be useful to drop variables with problems or
inconsistent values.
backend_kwargs: dictionary, optional
A dictionary of keyword arguments to pass on to the backend. This
may be useful when backend options would improve performance or
allow user control of dataset processing.
use_cftime: bool, optional
Only relevant if encoded dates come from a standard calendar
(e.g. 'gregorian', 'proleptic_gregorian', 'standard', or not
specified). If None (default), attempt to decode times to
``np.datetime64[ns]`` objects; if this is not possible, decode times to
``cftime.datetime`` objects. If True, always decode times to
``cftime.datetime`` objects, regardless of whether or not they can be
represented using ``np.datetime64[ns]`` objects. If False, always
decode times to ``np.datetime64[ns]`` objects; if this is not possible
raise an error.
Notes
-----
This is designed to be fully compatible with `DataArray.to_netcdf`. Saving
using `DataArray.to_netcdf` and then loading with this function will
produce an identical result.
All parameters are passed directly to `xarray.open_dataset`. See that
documentation for further details.
See also
--------
open_dataset
"""
dataset = open_dataset(
filename_or_obj,
group=group,
decode_cf=decode_cf,
mask_and_scale=mask_and_scale,
decode_times=decode_times,
autoclose=autoclose,
concat_characters=concat_characters,
decode_coords=decode_coords,
engine=engine,
chunks=chunks,
lock=lock,
cache=cache,
drop_variables=drop_variables,
backend_kwargs=backend_kwargs,
use_cftime=use_cftime,
)
if len(dataset.data_vars) != 1:
raise ValueError(
"Given file dataset contains more than one data "
"variable. Please read with xarray.open_dataset and "
"then select the variable you want."
)
else:
data_array, = dataset.data_vars.values()
data_array._file_obj = dataset._file_obj
# Reset names if they were changed during saving
# to ensure that we can 'roundtrip' perfectly
if DATAARRAY_NAME in dataset.attrs:
data_array.name = dataset.attrs[DATAARRAY_NAME]
del dataset.attrs[DATAARRAY_NAME]
if data_array.name == DATAARRAY_VARIABLE:
data_array.name = None
return data_array
class _MultiFileCloser:
__slots__ = ("file_objs",)
def __init__(self, file_objs):
self.file_objs = file_objs
def close(self):
for f in self.file_objs:
f.close()
def open_mfdataset(
paths,
chunks=None,
concat_dim="_not_supplied",
compat="no_conflicts",
preprocess=None,
engine=None,
lock=None,
data_vars="all",
coords="different",
combine="_old_auto",
autoclose=None,
parallel=False,
join="outer",
**kwargs,
):
"""Open multiple files as a single dataset.
If combine='by_coords' then the function ``combine_by_coords`` is used to combine
the datasets into one before returning the result, and if combine='nested' then
``combine_nested`` is used. The filepaths must be structured according to which
combining function is used, the details of which are given in the documentation for
``combine_by_coords`` and ``combine_nested``. By default the old (now deprecated)
``auto_combine`` will be used, please specify either ``combine='by_coords'`` or
``combine='nested'`` in future. Requires dask to be installed. See documentation for
details on dask [1]. Attributes from the first dataset file are used for the
combined dataset.
Parameters
----------
paths : str or sequence
Either a string glob in the form "path/to/my/files/*.nc" or an explicit list of
files to open. Paths can be given as strings or as pathlib Paths. If
concatenation along more than one dimension is desired, then ``paths`` must be a
nested list-of-lists (see ``manual_combine`` for details). (A string glob will
be expanded to a 1-dimensional list.)
chunks : int or dict, optional
Dictionary with keys given by dimension names and values given by chunk sizes.
In general, these should divide the dimensions of each dataset. If int, chunk
each dimension by ``chunks``. By default, chunks will be chosen to load entire
input files into memory at once. This has a major impact on performance: please
see the full documentation for more details [2].
concat_dim : str, or list of str, DataArray, Index or None, optional
Dimensions to concatenate files along. You only need to provide this argument
if any of the dimensions along which you want to concatenate is not a dimension
in the original datasets, e.g., if you want to stack a collection of 2D arrays
along a third dimension. Set ``concat_dim=[..., None, ...]`` explicitly to
disable concatenation along a particular dimension.
combine : {'by_coords', 'nested'}, optional
Whether ``xarray.combine_by_coords`` or ``xarray.combine_nested`` is used to
combine all the data. If this argument is not provided, `xarray.auto_combine` is
used, but in the future this behavior will switch to use
`xarray.combine_by_coords` by default.
compat : {'identical', 'equals', 'broadcast_equals',
'no_conflicts', 'override'}, optional
String indicating how to compare variables of the same name for
potential conflicts when merging:
* 'broadcast_equals': all values must be equal when variables are
broadcast against each other to ensure common dimensions.
* 'equals': all values and dimensions must be the same.
* 'identical': all values, dimensions and attributes must be the
same.
* 'no_conflicts': only values which are not null in both datasets
must be equal. The returned dataset then contains the combination
of all non-null values.
* 'override': skip comparing and pick variable from first dataset
preprocess : callable, optional
If provided, call this function on each dataset prior to concatenation.
You can find the file-name from which each dataset was loaded in
``ds.encoding['source']``.
engine : {'netcdf4', 'scipy', 'pydap', 'h5netcdf', 'pynio', 'cfgrib'}, \
optional
Engine to use when reading files. If not provided, the default engine
is chosen based on available dependencies, with a preference for
'netcdf4'.
lock : False or duck threading.Lock, optional
Resource lock to use when reading data from disk. Only relevant when
using dask or another form of parallelism. By default, appropriate
locks are chosen to safely read and write files with the currently
active dask scheduler.
data_vars : {'minimal', 'different', 'all' or list of str}, optional
These data variables will be concatenated together:
* 'minimal': Only data variables in which the dimension already
appears are included.
* 'different': Data variables which are not equal (ignoring
attributes) across all datasets are also concatenated (as well as
all for which dimension already appears). Beware: this option may
load the data payload of data variables into memory if they are not
already loaded.
* 'all': All data variables will be concatenated.
* list of str: The listed data variables will be concatenated, in
addition to the 'minimal' data variables.
coords : {'minimal', 'different', 'all' or list of str}, optional
These coordinate variables will be concatenated together:
* 'minimal': Only coordinates in which the dimension already appears
are included.
* 'different': Coordinates which are not equal (ignoring attributes)
across all datasets are also concatenated (as well as all for which
dimension already appears). Beware: this option may load the data
payload of coordinate variables into memory if they are not already
loaded.
* 'all': All coordinate variables will be concatenated, except
those corresponding to other dimensions.
* list of str: The listed coordinate variables will be concatenated,
in addition the 'minimal' coordinates.
parallel : bool, optional
If True, the open and preprocess steps of this function will be
performed in parallel using ``dask.delayed``. Default is False.
join : {'outer', 'inner', 'left', 'right', 'exact, 'override'}, optional
String indicating how to combine differing indexes
(excluding concat_dim) in objects
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': instead of aligning, raise `ValueError` when indexes to be
aligned are not equal
- 'override': if indexes are of same size, rewrite indexes to be
those of the first object with that dimension. Indexes for the same
dimension must have the same size in all objects.
**kwargs : optional
Additional arguments passed on to :py:func:`xarray.open_dataset`.
Returns
-------
xarray.Dataset
Notes
-----
``open_mfdataset`` opens files with read-only access. When you modify values
of a Dataset, even one linked to files on disk, only the in-memory copy you
are manipulating in xarray is modified: the original file on disk is never
touched.
See Also
--------
combine_by_coords
combine_nested
auto_combine
open_dataset
References
----------
.. [1] http://xarray.pydata.org/en/stable/dask.html
.. [2] http://xarray.pydata.org/en/stable/dask.html#chunking-and-performance
"""
if isinstance(paths, str):
if is_remote_uri(paths):
raise ValueError(
"cannot do wild-card matching for paths that are remote URLs: "
"{!r}. Instead, supply paths as an explicit list of strings.".format(
paths
)
)
paths = sorted(glob(paths))
else:
paths = [str(p) if isinstance(p, Path) else p for p in paths]
if not paths:
raise OSError("no files to open")
# If combine='by_coords' then this is unnecessary, but quick.
# If combine='nested' then this creates a flat list which is easier to
# iterate over, while saving the originally-supplied structure as "ids"
if combine == "nested":
if str(concat_dim) == "_not_supplied":
raise ValueError("Must supply concat_dim when using " "combine='nested'")
else:
if isinstance(concat_dim, (str, DataArray)) or concat_dim is None:
concat_dim = [concat_dim]
combined_ids_paths = _infer_concat_order_from_positions(paths)
ids, paths = (list(combined_ids_paths.keys()), list(combined_ids_paths.values()))
open_kwargs = dict(
engine=engine, chunks=chunks or {}, lock=lock, autoclose=autoclose, **kwargs
)
if parallel:
import dask
# wrap the open_dataset, getattr, and preprocess with delayed
open_ = dask.delayed(open_dataset)
getattr_ = dask.delayed(getattr)
if preprocess is not None:
preprocess = dask.delayed(preprocess)
else:
open_ = open_dataset
getattr_ = getattr
datasets = [open_(p, **open_kwargs) for p in paths]
file_objs = [getattr_(ds, "_file_obj") for ds in datasets]
if preprocess is not None:
datasets = [preprocess(ds) for ds in datasets]
if parallel:
# calling compute here will return the datasets/file_objs lists,
# the underlying datasets will still be stored as dask arrays
datasets, file_objs = dask.compute(datasets, file_objs)
# Combine all datasets, closing them in case of a ValueError
try:
if combine == "_old_auto":
# Use the old auto_combine for now
# Remove this after deprecation cycle from #2616 is complete
basic_msg = dedent(
"""\
In xarray version 0.15 the default behaviour of `open_mfdataset`
will change. To retain the existing behavior, pass
combine='nested'. To use future default behavior, pass
combine='by_coords'. See
http://xarray.pydata.org/en/stable/combining.html#combining-multi
"""
)
warnings.warn(basic_msg, FutureWarning, stacklevel=2)
combined = auto_combine(
datasets,
concat_dim=concat_dim,
compat=compat,
data_vars=data_vars,
coords=coords,
join=join,
from_openmfds=True,
)
elif combine == "nested":
# Combined nested list by successive concat and merge operations
# along each dimension, using structure given by "ids"
combined = _nested_combine(
datasets,
concat_dims=concat_dim,
compat=compat,
data_vars=data_vars,
coords=coords,
ids=ids,
join=join,
)
elif combine == "by_coords":
# Redo ordering from coordinates, ignoring how they were ordered
# previously
combined = combine_by_coords(
datasets, compat=compat, data_vars=data_vars, coords=coords, join=join
)
else:
raise ValueError(
"{} is an invalid option for the keyword argument"
" ``combine``".format(combine)
)
except ValueError:
for ds in datasets:
ds.close()
raise
combined._file_obj = _MultiFileCloser(file_objs)
combined.attrs = datasets[0].attrs
return combined
WRITEABLE_STORES: Dict[str, Callable] = {
"netcdf4": backends.NetCDF4DataStore.open,
"scipy": backends.ScipyDataStore,
"h5netcdf": backends.H5NetCDFStore,
}
def to_netcdf(
dataset: Dataset,
path_or_file=None,
mode: str = "w",
format: str = None,
group: str = None,
engine: str = None,
encoding: Mapping = None,
unlimited_dims: Iterable[Hashable] = None,
compute: bool = True,
multifile: bool = False,
invalid_netcdf: bool = False,
) -> Union[Tuple[ArrayWriter, AbstractDataStore], bytes, "Delayed", None]:
"""This function creates an appropriate datastore for writing a dataset to
disk as a netCDF file
See `Dataset.to_netcdf` for full API docs.
The ``multifile`` argument is only for the private use of save_mfdataset.
"""
if isinstance(path_or_file, Path):
path_or_file = str(path_or_file)
if encoding is None:
encoding = {}
if path_or_file is None:
if engine is None:
engine = "scipy"
elif engine != "scipy":
raise ValueError(
"invalid engine for creating bytes with "
"to_netcdf: %r. Only the default engine "
"or engine='scipy' is supported" % engine
)
if not compute:
raise NotImplementedError(
"to_netcdf() with compute=False is not yet implemented when "
"returning bytes"
)
elif isinstance(path_or_file, str):
if engine is None:
engine = _get_default_engine(path_or_file)
path_or_file = _normalize_path(path_or_file)
else: # file-like object
engine = "scipy"
# validate Dataset keys, DataArray names, and attr keys/values
_validate_dataset_names(dataset)
_validate_attrs(dataset)
try:
store_open = WRITEABLE_STORES[engine]
except KeyError:
raise ValueError("unrecognized engine for to_netcdf: %r" % engine)
if format is not None:
format = format.upper()
# handle scheduler specific logic
scheduler = _get_scheduler()
have_chunks = any(v.chunks for v in dataset.variables.values())
autoclose = have_chunks and scheduler in ["distributed", "multiprocessing"]
if autoclose and engine == "scipy":
raise NotImplementedError(
"Writing netCDF files with the %s backend "
"is not currently supported with dask's %s "
"scheduler" % (engine, scheduler)
)
target = path_or_file if path_or_file is not None else BytesIO()
kwargs = dict(autoclose=True) if autoclose else {}
if invalid_netcdf:
if engine == "h5netcdf":
kwargs["invalid_netcdf"] = invalid_netcdf
else:
raise ValueError(
"unrecognized option 'invalid_netcdf' for engine %s" % engine
)
store = store_open(target, mode, format, group, **kwargs)
if unlimited_dims is None:
unlimited_dims = dataset.encoding.get("unlimited_dims", None)
if unlimited_dims is not None:
if isinstance(unlimited_dims, str) or not isinstance(unlimited_dims, Iterable):
unlimited_dims = [unlimited_dims]
else:
unlimited_dims = list(unlimited_dims)
writer = ArrayWriter()
# TODO: figure out how to refactor this logic (here and in save_mfdataset)
# to avoid this mess of conditionals
try:
# TODO: allow this work (setting up the file for writing array data)
# to be parallelized with dask
dump_to_store(
dataset, store, writer, encoding=encoding, unlimited_dims=unlimited_dims
)
if autoclose:
store.close()
if multifile:
return writer, store
writes = writer.sync(compute=compute)
if path_or_file is None:
store.sync()
return target.getvalue()
finally:
if not multifile and compute:
store.close()
if not compute:
import dask
return dask.delayed(_finalize_store)(writes, store)
return None
def dump_to_store(
dataset, store, writer=None, encoder=None, encoding=None, unlimited_dims=None
):
"""Store dataset contents to a backends.*DataStore object."""
if writer is None:
writer = ArrayWriter()
if encoding is None:
encoding = {}
variables, attrs = conventions.encode_dataset_coordinates(dataset)
check_encoding = set()
for k, enc in encoding.items():
# no need to shallow copy the variable again; that already happened
# in encode_dataset_coordinates
variables[k].encoding = enc
check_encoding.add(k)
if encoder:
variables, attrs = encoder(variables, attrs)
store.store(variables, attrs, check_encoding, writer, unlimited_dims=unlimited_dims)
def save_mfdataset(
datasets, paths, mode="w", format=None, groups=None, engine=None, compute=True
):
"""Write multiple datasets to disk as netCDF files simultaneously.
This function is intended for use with datasets consisting of dask.array
objects, in which case it can write the multiple datasets to disk
simultaneously using a shared thread pool.
When not using dask, it is no different than calling ``to_netcdf``
repeatedly.
Parameters
----------
datasets : list of xarray.Dataset
List of datasets to save.
paths : list of str or list of Paths
List of paths to which to save each corresponding dataset.
mode : {'w', 'a'}, optional
Write ('w') or append ('a') mode. If mode='w', any existing file at
these locations will be overwritten.
format : {'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_64BIT',
'NETCDF3_CLASSIC'}, optional
File format for the resulting netCDF file:
* NETCDF4: Data is stored in an HDF5 file, using netCDF4 API
features.
* NETCDF4_CLASSIC: Data is stored in an HDF5 file, using only
netCDF 3 compatible API features.
* NETCDF3_64BIT: 64-bit offset version of the netCDF 3 file format,
which fully supports 2+ GB files, but is only compatible with
clients linked against netCDF version 3.6.0 or later.
* NETCDF3_CLASSIC: The classic netCDF 3 file format. It does not
handle 2+ GB files very well.
All formats are supported by the netCDF4-python library.
scipy.io.netcdf only supports the last two formats.
The default format is NETCDF4 if you are saving a file to disk and
have the netCDF4-python library available. Otherwise, xarray falls
back to using scipy to write netCDF files and defaults to the
NETCDF3_64BIT format (scipy does not support netCDF4).
groups : list of str, optional
Paths to the netCDF4 group in each corresponding file to which to save
datasets (only works for format='NETCDF4'). The groups will be created
if necessary.
engine : {'netcdf4', 'scipy', 'h5netcdf'}, optional
Engine to use when writing netCDF files. If not provided, the
default engine is chosen based on available dependencies, with a
preference for 'netcdf4' if writing to a file on disk.
See `Dataset.to_netcdf` for additional information.
compute: boolean
If true compute immediately, otherwise return a
``dask.delayed.Delayed`` object that can be computed later.
Examples
--------
Save a dataset into one netCDF per year of data:
>>> years, datasets = zip(*ds.groupby('time.year'))
>>> paths = ['%s.nc' % y for y in years]
>>> xr.save_mfdataset(datasets, paths)
"""
if mode == "w" and len(set(paths)) < len(paths):
raise ValueError(
"cannot use mode='w' when writing multiple " "datasets to the same path"
)
for obj in datasets:
if not isinstance(obj, Dataset):
raise TypeError(
"save_mfdataset only supports writing Dataset "
"objects, received type %s" % type(obj)
)
if groups is None:
groups = [None] * len(datasets)
if len({len(datasets), len(paths), len(groups)}) > 1:
raise ValueError(
"must supply lists of the same length for the "
"datasets, paths and groups arguments to "
"save_mfdataset"
)
writers, stores = zip(
*[
to_netcdf(
ds, path, mode, format, group, engine, compute=compute, multifile=True
)
for ds, path, group in zip(datasets, paths, groups)
]
)
try:
writes = [w.sync(compute=compute) for w in writers]
finally:
if compute:
for store in stores:
store.close()
if not compute:
import dask
return dask.delayed(
[dask.delayed(_finalize_store)(w, s) for w, s in zip(writes, stores)]
)
def _validate_datatypes_for_zarr_append(dataset):
"""DataArray.name and Dataset keys must be a string or None"""
def check_dtype(var):
if (
not np.issubdtype(var.dtype, np.number)
and not coding.strings.is_unicode_dtype(var.dtype)
and not var.dtype == object
):
# and not re.match('^bytes[1-9]+$', var.dtype.name)):
raise ValueError(
"Invalid dtype for data variable: {} "
"dtype must be a subtype of number, "
"a fixed sized string, a fixed size "
"unicode string or an object".format(var)
)
for k in dataset.data_vars.values():
check_dtype(k)
def _validate_append_dim_and_encoding(
ds_to_append, store, append_dim, encoding, **open_kwargs
):
try:
ds = backends.zarr.open_zarr(store, **open_kwargs)
except ValueError: # store empty
return
if append_dim:
if append_dim not in ds.dims:
raise ValueError(f"{append_dim} not a valid dimension in the Dataset")
for data_var in ds_to_append:
if data_var in ds:
if append_dim is None:
raise ValueError(
"variable '{}' already exists, but append_dim "
"was not set".format(data_var)
)
if data_var in encoding.keys():
raise ValueError(
"variable '{}' already exists, but encoding was"
"provided".format(data_var)
)
def to_zarr(
dataset,
store=None,
mode=None,
synchronizer=None,
group=None,
encoding=None,
compute=True,
consolidated=False,
append_dim=None,
):
"""This function creates an appropriate datastore for writing a dataset to
a zarr ztore
See `Dataset.to_zarr` for full API docs.
"""
if isinstance(store, Path):
store = str(store)
if encoding is None:
encoding = {}
# validate Dataset keys, DataArray names, and attr keys/values
_validate_dataset_names(dataset)
_validate_attrs(dataset)
if mode == "a":
_validate_datatypes_for_zarr_append(dataset)
_validate_append_dim_and_encoding(
dataset,
store,
append_dim,
group=group,
consolidated=consolidated,
encoding=encoding,
)
zstore = backends.ZarrStore.open_group(
store=store,
mode=mode,
synchronizer=synchronizer,
group=group,
consolidate_on_close=consolidated,
)
zstore.append_dim = append_dim
writer = ArrayWriter()
# TODO: figure out how to properly handle unlimited_dims
dump_to_store(dataset, zstore, writer, encoding=encoding)
writes = writer.sync(compute=compute)
if compute:
_finalize_store(writes, zstore)
else:
import dask
return dask.delayed(_finalize_store)(writes, zstore)
return zstore
|
jhamman/xarray
|
xarray/backends/api.py
|
Python
|
apache-2.0
| 48,993
|
from gifi.command import CommandException
from git import Repo, InvalidGitRepositoryError
def get_current_branch(repo):
current_branch = repo.git.rev_parse('--abbrev-ref', 'HEAD')
return current_branch
def check_repo_is_clean(repo):
if repo.is_dirty():
raise CommandException('Please commit all untracked files.')
def get_repo(repo=None):
"""
:rtype : git.Repo
"""
if repo is None:
try:
repo = Repo('.')
except InvalidGitRepositoryError:
raise CommandException('To run this command you need to be in git source code directory.')
return repo
def get_remote_url(remote, repo=None):
repo = get_repo(repo)
config_reader = repo.config_reader()
remote_url = config_reader.get_value('remote "%s"' % remote, "url")
config_reader.release()
return remote_url
def get_from_last_commit_message(repo, item_header):
item_header = item_header + ":"
commit_message_lines = repo.head.commit.message.split('\n')
lines_with_item = [e for e in commit_message_lines if e.lower().startswith(item_header.lower())]
items = map(lambda e: e[len(item_header):].split(','), lines_with_item)
items = [item.strip() for sub_list in items for item in sub_list]
return items
|
kokosing/git-gifi
|
gifi/utils/git_utils.py
|
Python
|
apache-2.0
| 1,277
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import warnings
from datetime import datetime
from functools import reduce
from itertools import filterfalse, tee
from typing import TYPE_CHECKING, Any, Callable, Dict, Generator, Iterable, List, Optional, Tuple, TypeVar
from urllib import parse
from flask import url_for
from jinja2 import Template
from airflow.configuration import conf
from airflow.exceptions import AirflowException
from airflow.utils.module_loading import import_string
if TYPE_CHECKING:
from airflow.models import TaskInstance
KEY_REGEX = re.compile(r'^[\w.-]+$')
CAMELCASE_TO_SNAKE_CASE_REGEX = re.compile(r'(?!^)([A-Z]+)')
T = TypeVar('T')
S = TypeVar('S')
def validate_key(k: str, max_length: int = 250) -> bool:
"""Validates value used as a key."""
if not isinstance(k, str):
raise TypeError("The key has to be a string")
elif len(k) > max_length:
raise AirflowException(f"The key has to be less than {max_length} characters")
elif not KEY_REGEX.match(k):
raise AirflowException(
"The key ({k}) has to be made of alphanumeric characters, dashes, "
"dots and underscores exclusively".format(k=k)
)
else:
return True
def alchemy_to_dict(obj: Any) -> Optional[Dict]:
"""Transforms a SQLAlchemy model instance into a dictionary"""
if not obj:
return None
output = {}
for col in obj.__table__.columns:
value = getattr(obj, col.name)
if isinstance(value, datetime):
value = value.isoformat()
output[col.name] = value
return output
def ask_yesno(question: str) -> bool:
"""Helper to get yes / no answer from user."""
yes = {'yes', 'y'}
no = {'no', 'n'}
done = False
print(question)
while not done:
choice = input().lower()
if choice in yes:
return True
elif choice in no:
return False
else:
print("Please respond by yes or no.")
def is_container(obj: Any) -> bool:
"""Test if an object is a container (iterable) but not a string"""
return hasattr(obj, '__iter__') and not isinstance(obj, str)
def as_tuple(obj: Any) -> tuple:
"""
If obj is a container, returns obj as a tuple.
Otherwise, returns a tuple containing obj.
"""
if is_container(obj):
return tuple(obj)
else:
return tuple([obj])
def chunks(items: List[T], chunk_size: int) -> Generator[List[T], None, None]:
"""Yield successive chunks of a given size from a list of items"""
if chunk_size <= 0:
raise ValueError('Chunk size must be a positive integer')
for i in range(0, len(items), chunk_size):
yield items[i : i + chunk_size]
def reduce_in_chunks(fn: Callable[[S, List[T]], S], iterable: List[T], initializer: S, chunk_size: int = 0):
"""
Reduce the given list of items by splitting it into chunks
of the given size and passing each chunk through the reducer
"""
if len(iterable) == 0:
return initializer
if chunk_size == 0:
chunk_size = len(iterable)
return reduce(fn, chunks(iterable, chunk_size), initializer)
def as_flattened_list(iterable: Iterable[Iterable[T]]) -> List[T]:
"""
Return an iterable with one level flattened
>>> as_flattened_list((('blue', 'red'), ('green', 'yellow', 'pink')))
['blue', 'red', 'green', 'yellow', 'pink']
"""
return [e for i in iterable for e in i]
def parse_template_string(template_string):
"""Parses Jinja template string."""
if "{{" in template_string: # jinja mode
return None, Template(template_string)
else:
return template_string, None
def render_log_filename(ti: "TaskInstance", try_number, filename_template) -> str:
"""
Given task instance, try_number, filename_template, return the rendered log
filename
:param ti: task instance
:param try_number: try_number of the task
:param filename_template: filename template, which can be jinja template or
python string template
"""
filename_template, filename_jinja_template = parse_template_string(filename_template)
if filename_jinja_template:
jinja_context = ti.get_template_context()
jinja_context['try_number'] = try_number
return filename_jinja_template.render(**jinja_context)
return filename_template.format(
dag_id=ti.dag_id,
task_id=ti.task_id,
execution_date=ti.execution_date.isoformat(),
try_number=try_number,
)
def convert_camel_to_snake(camel_str: str) -> str:
"""Converts CamelCase to snake_case."""
return CAMELCASE_TO_SNAKE_CASE_REGEX.sub(r'_\1', camel_str).lower()
def merge_dicts(dict1: Dict, dict2: Dict) -> Dict:
"""
Merge two dicts recursively, returning new dict (input dict is not mutated).
Lists are not concatenated. Items in dict2 overwrite those also found in dict1.
"""
merged = dict1.copy()
for k, v in dict2.items():
if k in merged and isinstance(v, dict):
merged[k] = merge_dicts(merged.get(k, {}), v)
else:
merged[k] = v
return merged
def partition(pred: Callable[[T], bool], iterable: Iterable[T]) -> Tuple[Iterable[T], Iterable[T]]:
"""Use a predicate to partition entries into false entries and true entries"""
iter_1, iter_2 = tee(iterable)
return filterfalse(pred, iter_1), filter(pred, iter_2)
def chain(*args, **kwargs):
"""This function is deprecated. Please use `airflow.models.baseoperator.chain`."""
warnings.warn(
"This function is deprecated. Please use `airflow.models.baseoperator.chain`.",
DeprecationWarning,
stacklevel=2,
)
return import_string('airflow.models.baseoperator.chain')(*args, **kwargs)
def cross_downstream(*args, **kwargs):
"""This function is deprecated. Please use `airflow.models.baseoperator.cross_downstream`."""
warnings.warn(
"This function is deprecated. Please use `airflow.models.baseoperator.cross_downstream`.",
DeprecationWarning,
stacklevel=2,
)
return import_string('airflow.models.baseoperator.cross_downstream')(*args, **kwargs)
def build_airflow_url_with_query(query: Dict[str, Any]) -> str:
"""
Build airflow url using base_url and default_view and provided query
For example:
'http://0.0.0.0:8000/base/graph?dag_id=my-task&root=&execution_date=2020-10-27T10%3A59%3A25.615587
"""
view = conf.get('webserver', 'dag_default_view').lower()
url = url_for(f"Airflow.{view}")
return f"{url}?{parse.urlencode(query)}"
|
dhuang/incubator-airflow
|
airflow/utils/helpers.py
|
Python
|
apache-2.0
| 7,385
|
#!/usr/bin/env python
# Find the smallest positive integer, x, such that 2x, 3x, 4x, 5x,
# and 6x, contain the same digits.
# NOTE:
# In order for x and 6x to have same digits, they must have same
# num of digits
# 10^(k - 1) <= x < 10^k has k digits
# Also need 10^(k - 1) <= 6x < 10^k
# Combining 10^(k - 1) <= x <= (10^k/6) <--- integer division
from python.decorators import euler_timer
def same_digs(n, multiplier):
candidates = [n * mult for mult in range(1, multiplier + 1)]
cand_digs = [sorted(int(dig) for dig in str(element))
for element in candidates]
# we sort the digits so only the content of the digit list matters
return (cand_digs.count(cand_digs[0]) == len(cand_digs))
def find_sequence_same_digs(digs, multiplier):
for n in range(10 ** (digs - 1), 10 ** digs / multiplier + 1):
if same_digs(n, multiplier):
return (True, n)
return (False, -1)
def find_sequence_same(multiplier):
digits = 1
found = False
while not found:
found, val = find_sequence_same_digs(digits, multiplier)
digits += 1
return val
def main(verbose=False):
return find_sequence_same(6)
if __name__ == '__main__':
print euler_timer(52)(main)(verbose=True)
|
dhermes/project-euler
|
python/complete/no052.py
|
Python
|
apache-2.0
| 1,258
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""mock data for testing"""
# data taken from google-cloud-dataflow
DEP_INFO = (
"""{
"PyVCF":
{
"installed_version": "0.6.8",
"installed_version_time": "2016-03-18 16:19:25+00:00",
"latest_version": "0.6.8",
"latest_version_time": "2016-03-18 16:19:25+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:02.362146+00:00"
},
"PyYAML":
{
"installed_version": "3.13",
"installed_version_time": "2018-07-05 22:53:15+00:00",
"latest_version": "3.13",
"latest_version_time": "2018-07-05 22:53:15+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:02.434491+00:00"
},
"apache-beam":
{
"installed_version": "2.7.0",
"installed_version_time": "2018-06-26 05:28:08+00:00",
"latest_version": "2.6.0",
"latest_version_time": "2018-08-08 18:15:31+00:00",
"is_latest": false,
"current_time": "2018-08-16 01:08:58.798679+00:00"
},
"avro":
{
"installed_version": "1.8.2",
"installed_version_time": "2017-05-20 15:56:15+00:00",
"latest_version": "1.8.2",
"latest_version_time": "2017-05-20 15:56:15+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:08:58.872872+00:00"
},
"cachetools":
{
"installed_version": "2.1.0",
"installed_version_time": "2018-05-12 16:26:31+00:00",
"latest_version": "2.1.0",
"latest_version_time": "2018-05-12 16:26:31+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:08:58.935337+00:00"
},
"certifi":
{
"installed_version": "2018.8.13",
"installed_version_time": "2018-08-13 07:10:37+00:00",
"latest_version": "2018.8.13",
"latest_version_time": "2018-08-13 07:10:37+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:08:58.999951+00:00"
},
"chardet":
{
"installed_version": "3.0.4",
"installed_version_time": "2017-06-08 14:34:33+00:00",
"latest_version": "3.0.4",
"latest_version_time": "2017-06-08 14:34:33+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:08:59.059407+00:00"
},
"crcmod":
{
"installed_version": "1.7",
"installed_version_time": "2010-06-27 14:35:29+00:00",
"latest_version": "1.7",
"latest_version_time": "2010-06-27 14:35:29+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:08:59.132582+00:00"
},
"dill":
{
"installed_version": "0.2.6",
"installed_version_time": "2017-02-01 19:15:09+00:00",
"latest_version": "0.2.8.2",
"latest_version_time": "2018-06-22 22:12:44+00:00",
"is_latest": false,
"current_time": "2018-08-16 01:08:59.193692+00:00"
},
"docopt":
{
"installed_version": "0.6.2",
"installed_version_time": "2014-06-16 11:18:57+00:00",
"latest_version": "0.6.2",
"latest_version_time": "2014-06-16 11:18:57+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:08:59.277869+00:00"
},
"enum34":
{
"installed_version": "1.1.6",
"installed_version_time": "2016-05-16 03:31:13+00:00",
"latest_version": "1.1.6",
"latest_version_time": "2016-05-16 03:31:13+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:08:59.342615+00:00"
},
"fasteners":
{
"installed_version": "0.14.1",
"installed_version_time": "2015-11-13 06:47:45+00:00",
"latest_version": "0.14.1",
"latest_version_time": "2015-11-13 06:47:45+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:08:59.407208+00:00"
},
"funcsigs":
{
"installed_version": "1.0.2",
"installed_version_time": "2016-04-25 22:22:05+00:00",
"latest_version": "1.0.2",
"latest_version_time": "2016-04-25 22:22:05+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:08:59.490506+00:00"
},
"future":
{
"installed_version": "0.16.0",
"installed_version_time": "2016-10-27 20:07:22+00:00",
"latest_version": "0.16.0",
"latest_version_time": "2016-10-27 20:07:22+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:08:59.554068+00:00"
},
"futures":
{
"installed_version": "3.2.0",
"installed_version_time": "2017-11-30 23:22:35+00:00",
"latest_version": "3.2.0",
"latest_version_time": "2017-11-30 23:22:35+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:08:59.615168+00:00"
},
"gapic-google-cloud-pubsub-v1":
{
"installed_version": "0.15.4",
"installed_version_time": "2017-04-14 17:47:55+00:00",
"latest_version": "0.15.4",
"latest_version_time": "2017-04-14 17:47:55+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:08:59.733030+00:00"
},
"google-apitools":
{
"installed_version": "0.5.20",
"installed_version_time": "2017-12-18 22:52:40+00:00",
"latest_version": "0.5.23",
"latest_version_time": "2018-04-24 15:57:55+00:00",
"is_latest": false,
"current_time": "2018-08-16 01:08:59.804865+00:00"
},
"google-auth":
{
"installed_version": "1.5.1",
"installed_version_time": "2018-07-31 23:24:08+00:00",
"latest_version": "1.5.1",
"latest_version_time": "2018-07-31 23:24:08+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:08:59.868660+00:00"
},
"google-auth-httplib2":
{
"installed_version": "0.0.3",
"installed_version_time": "2017-11-14 17:37:59+00:00",
"latest_version": "0.0.3",
"latest_version_time": "2017-11-14 17:37:59+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:08:59.939852+00:00"
},
"google-cloud-bigquery":
{
"installed_version": "0.25.0",
"installed_version_time": "2017-06-26 23:46:02+00:00",
"latest_version": "1.5.0",
"latest_version_time": "2018-08-02 22:48:21+00:00",
"is_latest": false,
"current_time": "2018-08-16 01:09:00.035277+00:00"
},
"google-cloud-core":
{
"installed_version": "0.25.0",
"installed_version_time": "2017-06-26 22:03:32+00:00",
"latest_version": "0.28.1",
"latest_version_time": "2018-02-28 20:01:50+00:00",
"is_latest": false,
"current_time": "2018-08-16 01:09:00.110586+00:00"
},
"google-cloud-dataflow":
{
"installed_version": "2.5.0",
"installed_version_time": "2018-06-27 17:22:15+00:00",
"latest_version": "2.5.0",
"latest_version_time": "2018-06-27 17:22:15+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:00.303731+00:00"
},
"google-cloud-pubsub":
{
"installed_version": "0.26.0",
"installed_version_time": "2017-06-26 23:46:11+00:00",
"latest_version": "0.37.0",
"latest_version_time": "2018-08-14 17:47:22+00:00",
"is_latest": false,
"current_time": "2018-08-16 01:09:00.375273+00:00"
},
"google-gax":
{
"installed_version": "0.15.16",
"installed_version_time": "2017-11-10 21:25:36+00:00",
"latest_version": "0.16.0",
"latest_version_time": "2018-02-28 21:10:07+00:00",
"is_latest": false,
"current_time": "2018-08-16 01:09:00.437938+00:00"
},
"googleapis-common-protos":
{
"installed_version": "1.5.3",
"installed_version_time": "2017-09-26 21:16:44+00:00",
"latest_version": "1.5.3",
"latest_version_time": "2017-09-26 21:16:44+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:00.497815+00:00"
},
"googledatastore":
{
"installed_version": "7.0.1",
"installed_version_time": "2017-04-10 16:32:21+00:00",
"latest_version": "7.0.1",
"latest_version_time": "2017-04-10 16:32:21+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:00.653841+00:00"
},
"grpc-google-iam-v1":
{
"installed_version": "0.11.4",
"installed_version_time": "2017-09-22 15:23:23+00:00",
"latest_version": "0.11.4",
"latest_version_time": "2017-09-22 15:23:23+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:00.708233+00:00"
},
"grpcio":
{
"installed_version": "1.14.1",
"installed_version_time": "2018-08-08 19:31:37+00:00",
"latest_version": "1.14.1",
"latest_version_time": "2018-08-08 19:31:37+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:00.850325+00:00"
},
"hdfs":
{
"installed_version": "2.1.0",
"installed_version_time": "2017-09-08 03:57:21+00:00",
"latest_version": "2.1.0",
"latest_version_time": "2017-09-08 03:57:21+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:00.941405+00:00"
},
"httplib2":
{
"installed_version": "0.9.2",
"installed_version_time": "2015-09-28 13:55:48+00:00",
"latest_version": "0.11.3",
"latest_version_time": "2018-03-30 02:29:15+00:00",
"is_latest": false,
"current_time": "2018-08-16 01:09:01.090439+00:00"
},
"idna":
{
"installed_version": "2.7",
"installed_version_time": "2018-06-11 02:52:19+00:00",
"latest_version": "2.7",
"latest_version_time": "2018-06-11 02:52:19+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:01.159959+00:00"
},
"mock":
{
"installed_version": "2.0.0",
"installed_version_time": "2016-04-06 01:38:18+00:00",
"latest_version": "2.0.0",
"latest_version_time": "2016-04-06 01:38:18+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:01.222772+00:00"
},
"monotonic":
{
"installed_version": "1.5",
"installed_version_time": "2018-05-03 20:55:31+00:00",
"latest_version": "1.5",
"latest_version_time": "2018-05-03 20:55:31+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:01.311531+00:00"
},
"oauth2client":
{
"installed_version": "4.1.2",
"installed_version_time": "2017-06-29 22:06:33+00:00",
"latest_version": "4.1.2",
"latest_version_time": "2017-06-29 22:06:33+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:01.384495+00:00"
},
"pbr":
{
"installed_version": "4.2.0",
"installed_version_time": "2018-07-23 22:26:49+00:00",
"latest_version": "4.2.0",
"latest_version_time": "2018-07-23 22:26:49+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:01.483006+00:00"
},
"pip":
{
"installed_version": "10.0.1",
"installed_version_time": "2018-04-19 18:56:05+00:00",
"latest_version": "18.0",
"latest_version_time": "2018-07-22 07:53:50+00:00",
"is_latest": false,
"current_time": "2018-08-16 01:09:01.559689+00:00"
},
"ply":
{
"installed_version": "3.8",
"installed_version_time": "2015-10-02 18:15:50+00:00",
"latest_version": "3.11",
"latest_version_time": "2018-02-15 19:01:27+00:00",
"is_latest": false,
"current_time": "2018-08-16 01:09:01.609361+00:00"
},
"proto-google-cloud-datastore-v1":
{
"installed_version": "0.90.4",
"installed_version_time": "2017-04-28 21:22:56+00:00",
"latest_version": "0.90.4",
"latest_version_time": "2017-04-28 21:22:56+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:01.727899+00:00"
},
"proto-google-cloud-pubsub-v1":
{
"installed_version": "0.15.4",
"installed_version_time": "2017-04-14 17:47:42+00:00",
"latest_version": "0.15.4",
"latest_version_time": "2017-04-14 17:47:42+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:01.922813+00:00"
},
"protobuf":
{
"installed_version": "3.6.1",
"installed_version_time": "2018-08-13 22:47:09+00:00",
"latest_version": "3.6.1",
"latest_version_time": "2018-08-13 22:47:09+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:02.005171+00:00"
},
"pyasn1":
{
"installed_version": "0.4.4",
"installed_version_time": "2018-07-26 07:43:55+00:00",
"latest_version": "0.4.4",
"latest_version_time": "2018-07-26 07:43:55+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:02.091667+00:00"
},
"pyasn1-modules":
{
"installed_version": "0.2.2",
"installed_version_time": "2018-06-28 08:01:55+00:00",
"latest_version": "0.2.2",
"latest_version_time": "2018-06-28 08:01:55+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:02.172666+00:00"
},
"pytz":
{
"installed_version": "2018.5",
"installed_version_time": "2018-06-29 06:53:04+00:00",
"latest_version": "2018.5",
"latest_version_time": "2018-06-29 06:53:04+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:02.276061+00:00"
},
"requests":
{
"installed_version": "2.19.1",
"installed_version_time": "2018-06-14 13:40:38+00:00",
"latest_version": "2.19.1",
"latest_version_time": "2018-06-14 13:40:38+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:02.525582+00:00"
},
"rsa":
{
"installed_version": "3.4.2",
"installed_version_time": "2016-03-29 13:16:23+00:00",
"latest_version": "3.4.2",
"latest_version_time": "2016-03-29 13:16:23+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:02.589273+00:00"
},
"setuptools":
{
"installed_version": "40.0.0",
"installed_version_time": "2018-07-09 04:23:03+00:00",
"latest_version": "40.0.0",
"latest_version_time": "2018-07-09 04:23:03+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:02.705405+00:00"
},
"six":
{
"installed_version": "1.11.0",
"installed_version_time": "2017-09-17 18:46:53+00:00",
"latest_version": "1.11.0",
"latest_version_time": "2017-09-17 18:46:53+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:02.767901+00:00"
},
"typing":
{
"installed_version": "3.6.4",
"installed_version_time": "2018-01-25 00:54:56+00:00",
"latest_version": "3.6.4",
"latest_version_time": "2018-01-25 00:54:56+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:02.820013+00:00"
},
"urllib3":
{
"installed_version": "1.23",
"installed_version_time": "2018-06-05 03:25:49+00:00",
"latest_version": "1.23",
"latest_version_time": "2018-06-05 03:25:49+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:02.894411+00:00"
},
"wheel":
{
"installed_version": "0.31.1",
"installed_version_time": "2018-05-13 17:28:23+00:00",
"latest_version": "0.31.1",
"latest_version_time": "2018-05-13 17:28:23+00:00",
"is_latest": true,
"current_time": "2018-08-16 01:09:02.966368+00:00"
}
}"""
)
|
GoogleCloudPlatform/cloud-opensource-python
|
compatibility_lib/compatibility_lib/testdata/mock_depinfo_data.py
|
Python
|
apache-2.0
| 16,866
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
from tvm import relay
from tvm.relay.backend.contrib.ethosu.tir.compiler import extract_constants, lower_to_te
def create_te_graph(func):
func, consts = extract_constants(func)
mod = tvm.IRModule.from_expr(func)
func = relay.transform.InferType()(mod)["main"]
te_graph = lower_to_te(func)
return te_graph, consts
|
Laurawly/tvm-1
|
tests/python/contrib/test_ethosu/cascader/infra.py
|
Python
|
apache-2.0
| 1,131
|
#!/usr/bin/env python
#
# Copyright 2011-2015 Jeff Bush
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Create a verilog ROM that contains estimates for 1/x in floating point.
# The input and output will be a normalized significand with an implicit leading one.
#
import math
import sys
if len(sys.argv) != 2:
print('enter number of entries')
sys.exit(1)
NUM_ENTRIES = int(sys.argv[1])
if (NUM_ENTRIES & (NUM_ENTRIES - 1)) != 0:
# Must be power of two
print('number of entries must be power of two')
sys.exit(1)
WIDTH = int(math.log(NUM_ENTRIES, 2))
print('''
//
// This file is autogenerated by make_reciprocal_rom.py
//
module reciprocal_rom(
input [''' + str(WIDTH - 1) + ''':0] significand,
output logic[''' + str(WIDTH - 1) + ''':0] reciprocal_estimate);
always_comb
begin
case (significand)''')
for x in range(0, NUM_ENTRIES):
significand = NUM_ENTRIES | x
reciprocal = int((NUM_ENTRIES * NUM_ENTRIES * 2) / significand)
print('\t\t\t%d\'h%x: reciprocal_estimate = %d\'h%x;' % (WIDTH, x, WIDTH, reciprocal & (NUM_ENTRIES - 1)))
print('''\t\t\tdefault: reciprocal_estimate = 6'h0;
\t\tendcase
\tend
endmodule
''')
|
FulcronZ/NyuziProcessor
|
tools/misc/make_reciprocal_rom.py
|
Python
|
apache-2.0
| 1,649
|
from locust import HttpLocust, TaskSet, task
class WebsiteTasks(TaskSet):
def on_start(self):
self.client.post("/login", {
"username": "test_user",
"password": ""
})
@task
def index(self):
self.client.get("/")
@task
def about(self):
self.client.get("/about/")
class WebsiteUser(HttpLocust):
task_set = WebsiteTasks
min_wait = 100
max_wait = 1500
|
itaymendel/taurus
|
tests/locust/simple.py
|
Python
|
apache-2.0
| 440
|
# -*- coding: utf-8 -*-
#
# Copyright 2014-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Options for BigMLer dataset transformations processing
"""
def get_dataset_trans_options(defaults=None):
"""Dataset transformations-related options
"""
if defaults is None:
defaults = {}
options = {
# String describing the datasets to be used as origin and their attrs.
'--datasets-json': {
'action': 'store',
'dest': 'datasets_json',
'default': defaults.get('datasets_json', None),
'help': ("JSON string describing the datasets used as origin to"
" merge or join "
" (e.g., [\"{\\\"id\\\": "
"\\\"dataset/50a20697035d0706da0004a4\\\"}\", "
" {\\\"id\\\": "
"\\\"dataset/50a20697035d0706da0004b5\\\"}]\""
"]).")},
# File. The path to the data file
'--file': {
'action': 'store',
'dest': 'training_set',
'default': defaults.get('training_set', None),
'help': ("Path to the file containing the data.")},
# Merge. Alias for multi-dataset
'--merge': {
'action': 'store_true',
'dest': 'multi_dataset',
'default': defaults.get('multi_dataset', False),
'help': ("Generate a new dataset by adding existing"
" datasets (alias for --multi-dataset).")},
# Juxtapose: join the rows in each dataset according to row index
'--juxtapose': {
'action': 'store_true',
'dest': 'juxtapose',
'default': defaults.get('juxtapose', False),
'help': ("Generate a new dataset by joining the rows"
"in several datasets according to row index.")},
# String describing the sql query to be executed.
'--sql-query': {
'action': 'store',
'dest': 'sql_query',
'default': defaults.get('sql_query', None),
'help': ("SQL query to be executed"
" (e.g., \"select A.`000000` as x, A.`00000a` as z, "
"A.`00000c` from A, B where A.id = B.id\").")},
# Path to the JSON file describing the SQL output fields and types
'--sql-output-fields': {
'action': 'store',
'dest': 'sql_output_fields',
'default': defaults.get('sql_output_fields', None),
'help': ("Path to a JSON file describing the structure and "
"types of the output fields.")},
# Path to the JSON file describing the query to be executed
'--json-query': {
'action': 'store',
'dest': 'json_query',
'default': defaults.get('json_query', None),
'help': ("Path to the JSON file describing the SQL query to "
"be executed.")}}
return options
|
bigmlcom/bigmler
|
bigmler/options/dataset_trans.py
|
Python
|
apache-2.0
| 3,499
|
__source__ = 'https://leetcode.com/problems/minimum-ascii-delete-sum-for-two-strings/'
# Time: O(m * n)
# Space: O(m * n)
#
# DP: dp[i][s2.length()] = dp[i+1][s2.length()] + s1.codePointAt(i).
#
# Description: Leetcode # 712. Minimum ASCII Delete Sum for Two Strings
#
# Given two strings s1, s2, find the lowest ASCII sum of deleted characters to make two strings equal.
#
# Example 1:
# Input: s1 = "sea", s2 = "eat"
# Output: 231
# Explanation: Deleting "s" from "sea" adds the ASCII value of "s" (115) to the sum.
# Deleting "t" from "eat" adds 116 to the sum.
# At the end, both strings are equal, and 115 + 116 = 231 is the minimum sum possible to achieve this.
# Example 2:
# Input: s1 = "delete", s2 = "leet"
# Output: 403
# Explanation: Deleting "dee" from "delete" to turn the string into "let",
# adds 100[d]+101[e]+101[e] to the sum. Deleting "e" from "leet" adds 101[e] to the sum.
# At the end, both strings are equal to "let", and the answer is 100+101+101+101 = 403.
# If instead we turned both strings into "lee" or "eet", we would get answers of 433 or 417, which are higher.
# Note:
#
# 0 < s1.length, s2.length <= 1000.
# All elements of each string will have an ASCII value in [97, 122].
#
import unittest
#85.47% 464ms
class Solution(object):
def minimumDeleteSum(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: int
"""
dp = [[0] * (len(s2) + 1) for _ in xrange(len(s1) + 1)]
for i in xrange(len(s1) -1, -1, -1):
dp[i][len(s2)] = dp[i+1][len(s2)] + ord(s1[i])
for j in xrange(len(s2) -1, -1, -1):
dp[len(s1)][j] = dp[len(s1)][j+1] + ord(s2[j])
for i in xrange(len(s1) -1, -1, -1):
for j in xrange(len(s2) -1, -1, -1):
if s1[i] == s2[j]:
dp[i][j] = dp[i+1][j+1]
else:
dp[i][j] = min(dp[i+1][j] + ord(s1[i]), dp[i][j+1] + ord(s2[j]))
return dp[0][0]
# 380ms 91.45%
# total ascii sum - max(dp sum)
class Solution2(object):
def minimumDeleteSum(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: int
"""
n1,n2 = len(s1), len(s2)
dp = [[0] * (n2 + 1) for _ in range(n1 + 1)]
for i in xrange(n1):
for j in xrange(n2):
if s1[i] == s2[j]:
dp[i+1][j+1] = dp[i][j] + 2 * ord(s1[i])
else:
dp[i+1][j+1] = max(dp[i+1][j], dp[i][j+1])
ttl = 0
for c in s1:
ttl += ord(c)
for c in s2:
ttl += ord(c)
return ttl - dp[n1][n2]
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/minimum-ascii-delete-sum-for-two-strings/solution/
# https://leetcode.com/problems/minimum-ascii-delete-sum-for-two-strings/discuss/108811/JavaDP(With-Explanation)
#
# LC 72 Edit Distance and LC 524 Longest Word through Deleting share the same idea as this one.
#
# Complexity Analysis
# Time Complexity: O(M*N), where M,N are the lengths of the given strings.
# We use nested for loops: each loop is O(M) and O(N) respectively.
# Space Complexity: O(M*N), the space used by dp.
#
# One dimension DP
# 12ms 100%
class Solution {
public int minimumDeleteSum(String s1, String s2) {
int m = s1.length(), n = s2.length();
char[] sa1 = s1.toCharArray();
char[] sa2 = s2.toCharArray();
int[] dp = new int[n+1];
for (int j = 1; j <= n; j++) {
dp[j] = dp[j - 1] + sa2[j-1];
}
for (int i = 1; i<= m; i++) {
int t1 = dp[0];
dp[0] += sa1[i-1];
for (int j = 1; j <= n; j++) {
int t2 = dp[j];
if (sa1[i - 1] == sa2[j-1]) dp[j] = t1;
else {
dp[j] = Math.min(dp[j] + sa1[i-1], dp[j-1] + sa2[j-1]);
}
t1 = t2;
}
}
return dp[n];
}
}
# 33ms 44.06%
class Solution {
public int minimumDeleteSum(String s1, String s2) {
int[][] dp = new int[s1.length() + 1][s2.length() + 1];
for (int i = s1.length() - 1; i >= 0; i--) {
dp[i][s2.length()] = dp[i + 1][s2.length()] + s1.codePointAt(i);
}
for (int j = s2.length() - 1; j >= 0; j--) {
dp[s1.length()][j] = dp[s1.length()][j+1] + s2.codePointAt(j);
}
for (int i = s1.length() - 1; i >= 0; i--) {
for (int j = s2.length() - 1; j >= 0; j--) {
if (s1.charAt(i) == s2.charAt(j)) {
dp[i][j] = dp[i+1][j+1];
} else {
dp[i][j] = Math.min(dp[i+1][j] + s1.codePointAt(i),
dp[i][j+1] + s2.codePointAt(j));
}
}
}
return dp[0][0];
}
}
'''
|
JulyKikuAkita/PythonPrac
|
cs15211/MinimumASCIIDeleteSumforTwoStrings.py
|
Python
|
apache-2.0
| 4,992
|
from __future__ import unicode_literals
from moto.core.exceptions import JsonRESTError
class DataSyncClientError(JsonRESTError):
code = 400
class InvalidRequestException(DataSyncClientError):
def __init__(self, msg=None):
self.code = 400
super(InvalidRequestException, self).__init__(
"InvalidRequestException", msg or "The request is not valid."
)
|
william-richard/moto
|
moto/datasync/exceptions.py
|
Python
|
apache-2.0
| 398
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'MainWindow.ui'
#
# Created: Tue May 28 11:30:41 2013
# by: PyQt4 UI code generator 4.9.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(977, 747)
MainWindow.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
self.centralWidget = QtGui.QWidget(MainWindow)
self.centralWidget.setObjectName(_fromUtf8("centralWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.centralWidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.verticalLayoutMain = QtGui.QVBoxLayout()
self.verticalLayoutMain.setObjectName(_fromUtf8("verticalLayoutMain"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.verticalLayoutUtterances = QtGui.QVBoxLayout()
self.verticalLayoutUtterances.setObjectName(_fromUtf8("verticalLayoutUtterances"))
self.labelUtterances = QtGui.QLabel(self.centralWidget)
self.labelUtterances.setObjectName(_fromUtf8("labelUtterances"))
self.verticalLayoutUtterances.addWidget(self.labelUtterances)
self.listwidgetUtterances = QtGui.QListWidget(self.centralWidget)
self.listwidgetUtterances.setMaximumSize(QtCore.QSize(16777215, 150))
self.listwidgetUtterances.setObjectName(_fromUtf8("listwidgetUtterances"))
self.verticalLayoutUtterances.addWidget(self.listwidgetUtterances)
self.pushbuttonNewTierUtterances = QtGui.QPushButton(self.centralWidget)
self.pushbuttonNewTierUtterances.setObjectName(_fromUtf8("pushbuttonNewTierUtterances"))
self.verticalLayoutUtterances.addWidget(self.pushbuttonNewTierUtterances)
self.horizontalLayout.addLayout(self.verticalLayoutUtterances)
self.verticalLayoutWords = QtGui.QVBoxLayout()
self.verticalLayoutWords.setObjectName(_fromUtf8("verticalLayoutWords"))
self.labelWords = QtGui.QLabel(self.centralWidget)
self.labelWords.setObjectName(_fromUtf8("labelWords"))
self.verticalLayoutWords.addWidget(self.labelWords)
self.listwidgetWords = QtGui.QListWidget(self.centralWidget)
self.listwidgetWords.setEnabled(True)
self.listwidgetWords.setMaximumSize(QtCore.QSize(16777215, 150))
self.listwidgetWords.setObjectName(_fromUtf8("listwidgetWords"))
self.verticalLayoutWords.addWidget(self.listwidgetWords)
self.pushbuttonNewTierWords = QtGui.QPushButton(self.centralWidget)
self.pushbuttonNewTierWords.setObjectName(_fromUtf8("pushbuttonNewTierWords"))
self.verticalLayoutWords.addWidget(self.pushbuttonNewTierWords)
self.horizontalLayout.addLayout(self.verticalLayoutWords)
self.verticalLayoutMorphemes = QtGui.QVBoxLayout()
self.verticalLayoutMorphemes.setObjectName(_fromUtf8("verticalLayoutMorphemes"))
self.labelMorphemes = QtGui.QLabel(self.centralWidget)
self.labelMorphemes.setObjectName(_fromUtf8("labelMorphemes"))
self.verticalLayoutMorphemes.addWidget(self.labelMorphemes)
self.listwidgetMorphemes = QtGui.QListWidget(self.centralWidget)
self.listwidgetMorphemes.setMaximumSize(QtCore.QSize(16777215, 150))
self.listwidgetMorphemes.setBaseSize(QtCore.QSize(0, 0))
self.listwidgetMorphemes.setObjectName(_fromUtf8("listwidgetMorphemes"))
self.verticalLayoutMorphemes.addWidget(self.listwidgetMorphemes)
self.pushbuttonNewTierMorphemes = QtGui.QPushButton(self.centralWidget)
self.pushbuttonNewTierMorphemes.setObjectName(_fromUtf8("pushbuttonNewTierMorphemes"))
self.verticalLayoutMorphemes.addWidget(self.pushbuttonNewTierMorphemes)
self.horizontalLayout.addLayout(self.verticalLayoutMorphemes)
self.verticalLayoutFunctions = QtGui.QVBoxLayout()
self.verticalLayoutFunctions.setObjectName(_fromUtf8("verticalLayoutFunctions"))
self.labelFunctions = QtGui.QLabel(self.centralWidget)
self.labelFunctions.setObjectName(_fromUtf8("labelFunctions"))
self.verticalLayoutFunctions.addWidget(self.labelFunctions)
self.listwidgetFunctions = QtGui.QListWidget(self.centralWidget)
self.listwidgetFunctions.setMaximumSize(QtCore.QSize(16777215, 150))
self.listwidgetFunctions.setObjectName(_fromUtf8("listwidgetFunctions"))
self.verticalLayoutFunctions.addWidget(self.listwidgetFunctions)
self.pushbuttonNewTierFunctions = QtGui.QPushButton(self.centralWidget)
self.pushbuttonNewTierFunctions.setObjectName(_fromUtf8("pushbuttonNewTierFunctions"))
self.verticalLayoutFunctions.addWidget(self.pushbuttonNewTierFunctions)
self.horizontalLayout.addLayout(self.verticalLayoutFunctions)
self.verticalLayoutTranslations = QtGui.QVBoxLayout()
self.verticalLayoutTranslations.setObjectName(_fromUtf8("verticalLayoutTranslations"))
self.labelTranslations = QtGui.QLabel(self.centralWidget)
self.labelTranslations.setObjectName(_fromUtf8("labelTranslations"))
self.verticalLayoutTranslations.addWidget(self.labelTranslations)
self.listwidgetTranslations = QtGui.QListWidget(self.centralWidget)
self.listwidgetTranslations.setMaximumSize(QtCore.QSize(16777215, 150))
self.listwidgetTranslations.setObjectName(_fromUtf8("listwidgetTranslations"))
self.verticalLayoutTranslations.addWidget(self.listwidgetTranslations)
self.pushbuttonNewTierTranslations = QtGui.QPushButton(self.centralWidget)
self.pushbuttonNewTierTranslations.setObjectName(_fromUtf8("pushbuttonNewTierTranslations"))
self.verticalLayoutTranslations.addWidget(self.pushbuttonNewTierTranslations)
self.horizontalLayout.addLayout(self.verticalLayoutTranslations)
self.verticalLayoutMain.addLayout(self.horizontalLayout)
self.line = QtGui.QFrame(self.centralWidget)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.verticalLayoutMain.addWidget(self.line)
self.horizontalLayoutEditArea = QtGui.QHBoxLayout()
self.horizontalLayoutEditArea.setObjectName(_fromUtf8("horizontalLayoutEditArea"))
self.verticalLayoutProjectFiles = QtGui.QVBoxLayout()
self.verticalLayoutProjectFiles.setObjectName(_fromUtf8("verticalLayoutProjectFiles"))
self.label_6 = QtGui.QLabel(self.centralWidget)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.verticalLayoutProjectFiles.addWidget(self.label_6)
self.listwidgetFiles = QtGui.QListWidget(self.centralWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.listwidgetFiles.sizePolicy().hasHeightForWidth())
self.listwidgetFiles.setSizePolicy(sizePolicy)
self.listwidgetFiles.setObjectName(_fromUtf8("listwidgetFiles"))
self.verticalLayoutProjectFiles.addWidget(self.listwidgetFiles)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.toolbuttonAddFile = QtGui.QToolButton(self.centralWidget)
self.toolbuttonAddFile.setEnabled(True)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/pixmaps/fileopen.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolbuttonAddFile.setIcon(icon)
self.toolbuttonAddFile.setAutoRaise(False)
self.toolbuttonAddFile.setObjectName(_fromUtf8("toolbuttonAddFile"))
self.horizontalLayout_4.addWidget(self.toolbuttonAddFile)
self.toolbuttonNewFile = QtGui.QToolButton(self.centralWidget)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/pixmaps/filenew.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolbuttonNewFile.setIcon(icon1)
self.toolbuttonNewFile.setAutoRaise(False)
self.toolbuttonNewFile.setObjectName(_fromUtf8("toolbuttonNewFile"))
self.horizontalLayout_4.addWidget(self.toolbuttonNewFile)
self.toolbuttonExportFile = QtGui.QToolButton(self.centralWidget)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/pixmaps/fileexport.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolbuttonExportFile.setIcon(icon2)
self.toolbuttonExportFile.setAutoRaise(False)
self.toolbuttonExportFile.setObjectName(_fromUtf8("toolbuttonExportFile"))
self.horizontalLayout_4.addWidget(self.toolbuttonExportFile)
self.toolbuttonRemoveFile = QtGui.QToolButton(self.centralWidget)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/pixmaps/fileclose.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.toolbuttonRemoveFile.setIcon(icon3)
self.toolbuttonRemoveFile.setAutoRaise(False)
self.toolbuttonRemoveFile.setObjectName(_fromUtf8("toolbuttonRemoveFile"))
self.horizontalLayout_4.addWidget(self.toolbuttonRemoveFile)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem)
self.verticalLayoutProjectFiles.addLayout(self.horizontalLayout_4)
self.horizontalLayoutEditArea.addLayout(self.verticalLayoutProjectFiles)
self.texteditInterlinear = PoioIlTextEdit(self.centralWidget)
self.texteditInterlinear.setObjectName(_fromUtf8("texteditInterlinear"))
self.horizontalLayoutEditArea.addWidget(self.texteditInterlinear)
self.verticalLayoutMain.addLayout(self.horizontalLayoutEditArea)
self.verticalLayoutMain.setStretch(2, 1)
self.verticalLayout.addLayout(self.verticalLayoutMain)
MainWindow.setCentralWidget(self.centralWidget)
self.statusBar = QtGui.QStatusBar(MainWindow)
self.statusBar.setObjectName(_fromUtf8("statusBar"))
MainWindow.setStatusBar(self.statusBar)
self.menuBar = QtGui.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 977, 21))
self.menuBar.setObjectName(_fromUtf8("menuBar"))
self.menuFile = QtGui.QMenu(self.menuBar)
self.menuFile.setObjectName(_fromUtf8("menuFile"))
self.menuEdit = QtGui.QMenu(self.menuBar)
self.menuEdit.setObjectName(_fromUtf8("menuEdit"))
self.menuAbout = QtGui.QMenu(self.menuBar)
self.menuAbout.setObjectName(_fromUtf8("menuAbout"))
MainWindow.setMenuBar(self.menuBar)
self.toolBar = QtGui.QToolBar(MainWindow)
self.toolBar.setObjectName(_fromUtf8("toolBar"))
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionOpen = QtGui.QAction(MainWindow)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/pixmaps/projectopen.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionOpen.setIcon(icon4)
self.actionOpen.setObjectName(_fromUtf8("actionOpen"))
self.actionSave = QtGui.QAction(MainWindow)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/pixmaps/filesave.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSave.setIcon(icon5)
self.actionSave.setObjectName(_fromUtf8("actionSave"))
self.actionSaveAs = QtGui.QAction(MainWindow)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/pixmaps/filesaveas.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionSaveAs.setIcon(icon6)
self.actionSaveAs.setObjectName(_fromUtf8("actionSaveAs"))
self.actionQuit = QtGui.QAction(MainWindow)
self.actionQuit.setObjectName(_fromUtf8("actionQuit"))
self.actionNew = QtGui.QAction(MainWindow)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/pixmaps/projectnew.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionNew.setIcon(icon7)
self.actionNew.setObjectName(_fromUtf8("actionNew"))
self.actionDeleteUtterance = QtGui.QAction(MainWindow)
self.actionDeleteUtterance.setEnabled(True)
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/pixmaps/removeutterance.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionDeleteUtterance.setIcon(icon8)
self.actionDeleteUtterance.setObjectName(_fromUtf8("actionDeleteUtterance"))
self.actionInsertUtterance = QtGui.QAction(MainWindow)
self.actionInsertUtterance.setEnabled(True)
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/pixmaps/insertutterance.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionInsertUtterance.setIcon(icon9)
self.actionInsertUtterance.setObjectName(_fromUtf8("actionInsertUtterance"))
self.actionCopyUtterance = QtGui.QAction(MainWindow)
self.actionCopyUtterance.setObjectName(_fromUtf8("actionCopyUtterance"))
self.actionInsertWord = QtGui.QAction(MainWindow)
self.actionInsertWord.setEnabled(True)
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/pixmaps/insertword.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionInsertWord.setIcon(icon10)
self.actionInsertWord.setObjectName(_fromUtf8("actionInsertWord"))
self.actionDeleteWord = QtGui.QAction(MainWindow)
self.actionDeleteWord.setEnabled(True)
icon11 = QtGui.QIcon()
icon11.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/pixmaps/removeword.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.actionDeleteWord.setIcon(icon11)
self.actionDeleteWord.setObjectName(_fromUtf8("actionDeleteWord"))
self.actionAboutPoioILE = QtGui.QAction(MainWindow)
self.actionAboutPoioILE.setObjectName(_fromUtf8("actionAboutPoioILE"))
self.actionOptions = QtGui.QAction(MainWindow)
self.actionOptions.setObjectName(_fromUtf8("actionOptions"))
self.actionAddFile = QtGui.QAction(MainWindow)
self.actionAddFile.setEnabled(True)
self.actionAddFile.setIcon(icon)
self.actionAddFile.setObjectName(_fromUtf8("actionAddFile"))
self.actionExportFile = QtGui.QAction(MainWindow)
self.actionExportFile.setEnabled(True)
self.actionExportFile.setIcon(icon2)
self.actionExportFile.setObjectName(_fromUtf8("actionExportFile"))
self.actionRemoveFile = QtGui.QAction(MainWindow)
self.actionRemoveFile.setEnabled(True)
self.actionRemoveFile.setIcon(icon3)
self.actionRemoveFile.setObjectName(_fromUtf8("actionRemoveFile"))
self.actionNewFile = QtGui.QAction(MainWindow)
self.actionNewFile.setEnabled(True)
self.actionNewFile.setIcon(icon1)
self.actionNewFile.setObjectName(_fromUtf8("actionNewFile"))
self.menuFile.addAction(self.actionNew)
self.menuFile.addAction(self.actionOpen)
self.menuFile.addAction(self.actionSave)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionAddFile)
self.menuFile.addAction(self.actionNewFile)
self.menuFile.addAction(self.actionExportFile)
self.menuFile.addAction(self.actionRemoveFile)
self.menuFile.addSeparator()
self.menuFile.addAction(self.actionQuit)
self.menuEdit.addAction(self.actionCopyUtterance)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionInsertUtterance)
self.menuEdit.addAction(self.actionDeleteUtterance)
self.menuEdit.addAction(self.actionInsertWord)
self.menuEdit.addAction(self.actionDeleteWord)
self.menuEdit.addSeparator()
self.menuEdit.addAction(self.actionOptions)
self.menuAbout.addAction(self.actionAboutPoioILE)
self.menuBar.addAction(self.menuFile.menuAction())
self.menuBar.addAction(self.menuEdit.menuAction())
self.menuBar.addAction(self.menuAbout.menuAction())
self.toolBar.addAction(self.actionNew)
self.toolBar.addAction(self.actionOpen)
self.toolBar.addAction(self.actionSave)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionInsertUtterance)
self.toolBar.addAction(self.actionDeleteUtterance)
self.toolBar.addAction(self.actionInsertWord)
self.toolBar.addAction(self.actionDeleteWord)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "PoioILE", None))
self.labelUtterances.setText(_translate("MainWindow", "Utterance tiers", None))
self.pushbuttonNewTierUtterances.setText(_translate("MainWindow", "New", None))
self.labelWords.setText(_translate("MainWindow", "Word tiers", None))
self.pushbuttonNewTierWords.setText(_translate("MainWindow", "New", None))
self.labelMorphemes.setText(_translate("MainWindow", "Morpheme tiers", None))
self.pushbuttonNewTierMorphemes.setText(_translate("MainWindow", "New", None))
self.labelFunctions.setText(_translate("MainWindow", "Gloss tiers", None))
self.pushbuttonNewTierFunctions.setText(_translate("MainWindow", "New", None))
self.labelTranslations.setText(_translate("MainWindow", "Translation tiers", None))
self.pushbuttonNewTierTranslations.setText(_translate("MainWindow", "New", None))
self.label_6.setText(_translate("MainWindow", "Project Files:", None))
self.toolbuttonAddFile.setToolTip(_translate("MainWindow", "Add File...", None))
self.toolbuttonAddFile.setText(_translate("MainWindow", "...", None))
self.toolbuttonNewFile.setToolTip(_translate("MainWindow", "New File", None))
self.toolbuttonNewFile.setText(_translate("MainWindow", "...", None))
self.toolbuttonExportFile.setToolTip(_translate("MainWindow", "Export File...", None))
self.toolbuttonExportFile.setText(_translate("MainWindow", "...", None))
self.toolbuttonRemoveFile.setToolTip(_translate("MainWindow", "Remove File", None))
self.toolbuttonRemoveFile.setText(_translate("MainWindow", "...", None))
self.menuFile.setTitle(_translate("MainWindow", "File", None))
self.menuEdit.setTitle(_translate("MainWindow", "Edit", None))
self.menuAbout.setTitle(_translate("MainWindow", "About", None))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar", None))
self.actionOpen.setText(_translate("MainWindow", "Open Project...", None))
self.actionOpen.setShortcut(_translate("MainWindow", "Ctrl+O", None))
self.actionSave.setText(_translate("MainWindow", "Save Project", None))
self.actionSave.setShortcut(_translate("MainWindow", "Ctrl+S", None))
self.actionSaveAs.setText(_translate("MainWindow", "Save File as...", None))
self.actionSaveAs.setShortcut(_translate("MainWindow", "Ctrl+Alt+S", None))
self.actionQuit.setText(_translate("MainWindow", "Quit", None))
self.actionQuit.setShortcut(_translate("MainWindow", "Ctrl+Q", None))
self.actionNew.setText(_translate("MainWindow", "New Project...", None))
self.actionNew.setToolTip(_translate("MainWindow", "Create a new annotation project", None))
self.actionNew.setShortcut(_translate("MainWindow", "Ctrl+N", None))
self.actionDeleteUtterance.setText(_translate("MainWindow", "Delete utterance", None))
self.actionInsertUtterance.setText(_translate("MainWindow", "Insert new utterance", None))
self.actionInsertUtterance.setToolTip(_translate("MainWindow", "Insert new utterance", None))
self.actionCopyUtterance.setText(_translate("MainWindow", "Copy utterance", None))
self.actionCopyUtterance.setShortcut(_translate("MainWindow", "Ctrl+C", None))
self.actionInsertWord.setText(_translate("MainWindow", "Insert new word", None))
self.actionDeleteWord.setText(_translate("MainWindow", "Delete word", None))
self.actionAboutPoioILE.setText(_translate("MainWindow", "About PoioILE...", None))
self.actionOptions.setText(_translate("MainWindow", "Options...", None))
self.actionAddFile.setText(_translate("MainWindow", "Add File...", None))
self.actionExportFile.setText(_translate("MainWindow", "Export File...", None))
self.actionRemoveFile.setText(_translate("MainWindow", "Remove File", None))
self.actionNewFile.setText(_translate("MainWindow", "New File..", None))
from PoioIlTextEdit import PoioIlTextEdit
import poio_rc
|
cidles/poio-analyzer
|
src/poio/ui/Ui_MainWindow.py
|
Python
|
apache-2.0
| 21,515
|
from __future__ import unicode_literals
import threading
import mock
from mopidy import backend as backend_api
import spotify
from mopidy_spotify import backend, library, playback, playlists
def get_backend(config, session_mock=None):
obj = backend.SpotifyBackend(config=config, audio=None)
if session_mock:
obj._session = session_mock
else:
obj._session = mock.Mock()
obj._session.playlist_container = None
obj._event_loop = mock.Mock()
return obj
def test_uri_schemes(spotify_mock, config):
backend = get_backend(config)
assert 'spotify' in backend.uri_schemes
def test_init_sets_up_the_providers(spotify_mock, config):
backend = get_backend(config)
assert isinstance(backend.library, library.SpotifyLibraryProvider)
assert isinstance(backend.library, backend_api.LibraryProvider)
assert isinstance(backend.playback, playback.SpotifyPlaybackProvider)
assert isinstance(backend.playback, backend_api.PlaybackProvider)
assert isinstance(backend.playlists, playlists.SpotifyPlaylistsProvider)
assert isinstance(backend.playlists, backend_api.PlaylistsProvider)
def test_init_disables_playlists_provider_if_not_allowed(spotify_mock, config):
config['spotify']['allow_playlists'] = False
backend = get_backend(config)
assert backend.playlists is None
def test_on_start_creates_configured_session(tmpdir, spotify_mock, config):
cache_location_mock = mock.PropertyMock()
settings_location_mock = mock.PropertyMock()
config_mock = spotify_mock.Config.return_value
type(config_mock).cache_location = cache_location_mock
type(config_mock).settings_location = settings_location_mock
get_backend(config).on_start()
spotify_mock.Config.assert_called_once_with()
config_mock.load_application_key_file.assert_called_once_with(mock.ANY)
cache_location_mock.assert_called_once_with(
'%s' % tmpdir.join('cache', 'spotify'))
settings_location_mock.assert_called_once_with(
'%s' % tmpdir.join('data', 'spotify'))
spotify_mock.Session.assert_called_once_with(config_mock)
def test_on_start_disallows_network_if_config_is_set(spotify_mock, config):
session = spotify_mock.Session.return_value
allow_network_mock = mock.PropertyMock()
type(session.connection).allow_network = allow_network_mock
config['spotify']['allow_network'] = False
get_backend(config).on_start()
allow_network_mock.assert_called_once_with(False)
def test_on_start_configures_preferred_bitrate(spotify_mock, config):
session = spotify_mock.Session.return_value
preferred_bitrate_mock = mock.PropertyMock()
type(session).preferred_bitrate = preferred_bitrate_mock
config['spotify']['bitrate'] = 320
get_backend(config).on_start()
preferred_bitrate_mock.assert_called_once_with(
spotify.Bitrate.BITRATE_320k)
def test_on_start_configures_volume_normalization(spotify_mock, config):
session = spotify_mock.Session.return_value
volume_normalization_mock = mock.PropertyMock()
type(session).volume_normalization = volume_normalization_mock
config['spotify']['volume_normalization'] = False
get_backend(config).on_start()
volume_normalization_mock.assert_called_once_with(False)
def test_on_start_configures_proxy(spotify_mock, config):
config['proxy'] = {
'scheme': 'https',
'hostname': 'my-proxy.example.com',
'port': 8080,
'username': 'alice',
'password': 's3cret',
}
spotify_config = spotify_mock.Config.return_value
get_backend(config).on_start()
assert spotify_config.proxy == 'https://my-proxy.example.com:8080'
assert spotify_config.proxy_username == 'alice'
assert spotify_config.proxy_password == 's3cret'
def test_on_start_adds_connection_state_changed_handler_to_session(
spotify_mock, config):
session = spotify_mock.Session.return_value
get_backend(config).on_start()
assert (mock.call(
spotify_mock.SessionEvent.CONNECTION_STATE_UPDATED,
backend.on_connection_state_changed,
backend.SpotifyBackend._logged_in,
backend.SpotifyBackend._logged_out,
mock.ANY)
in session.on.call_args_list)
def test_on_start_adds_play_token_lost_handler_to_session(
spotify_mock, config):
session = spotify_mock.Session.return_value
obj = get_backend(config)
obj.on_start()
assert (mock.call(
spotify_mock.SessionEvent.PLAY_TOKEN_LOST,
backend.on_play_token_lost, mock.ANY)
in session.on.call_args_list)
def test_on_start_starts_the_pyspotify_event_loop(spotify_mock, config):
backend = get_backend(config)
backend.on_start()
spotify_mock.EventLoop.assert_called_once_with(backend._session)
spotify_mock.EventLoop.return_value.start.assert_called_once_with()
def test_on_start_logs_in(spotify_mock, config):
backend = get_backend(config)
backend.on_start()
spotify_mock.Session.return_value.login.assert_called_once_with(
'alice', 'password')
def test_on_stop_logs_out_and_waits_for_logout_to_complete(
spotify_mock, config, caplog):
backend = get_backend(config)
backend._logged_out = mock.Mock()
backend.on_stop()
assert 'Logging out of Spotify' in caplog.text()
backend._session.logout.assert_called_once_with()
backend._logged_out.wait.assert_called_once_with()
backend._event_loop.stop.assert_called_once_with()
def test_on_connection_state_changed_when_logged_out(spotify_mock, caplog):
session_mock = spotify_mock.Session.return_value
session_mock.connection.state = spotify_mock.ConnectionState.LOGGED_OUT
logged_in_event = threading.Event()
logged_out_event = threading.Event()
actor_mock = mock.Mock(spec=backend.SpotifyBackend)
backend.on_connection_state_changed(
session_mock, logged_in_event, logged_out_event, actor_mock)
assert 'Logged out of Spotify' in caplog.text()
assert not logged_in_event.is_set()
assert logged_out_event.is_set()
def test_on_connection_state_changed_when_logged_in(spotify_mock, caplog):
session_mock = spotify_mock.Session.return_value
session_mock.connection.state = spotify_mock.ConnectionState.LOGGED_IN
logged_in_event = threading.Event()
logged_out_event = threading.Event()
actor_mock = mock.Mock(spec=backend.SpotifyBackend)
backend.on_connection_state_changed(
session_mock, logged_in_event, logged_out_event, actor_mock)
assert 'Logged in to Spotify in online mode' in caplog.text()
assert logged_in_event.is_set()
assert not logged_out_event.is_set()
actor_mock.on_logged_in.assert_called_once_with()
def test_on_connection_state_changed_when_disconnected(spotify_mock, caplog):
session_mock = spotify_mock.Session.return_value
session_mock.connection.state = spotify_mock.ConnectionState.DISCONNECTED
logged_in_event = threading.Event()
logged_out_event = threading.Event()
actor_mock = mock.Mock(spec=backend.SpotifyBackend)
backend.on_connection_state_changed(
session_mock, logged_in_event, logged_out_event, actor_mock)
assert 'Disconnected from Spotify' in caplog.text()
def test_on_connection_state_changed_when_offline(spotify_mock, caplog):
session_mock = spotify_mock.Session.return_value
session_mock.connection.state = spotify_mock.ConnectionState.OFFLINE
logged_in_event = threading.Event()
logged_out_event = threading.Event()
actor_mock = mock.Mock(spec=backend.SpotifyBackend)
backend.on_connection_state_changed(
session_mock, logged_in_event, logged_out_event, actor_mock)
assert 'Logged in to Spotify in offline mode' in caplog.text()
assert logged_in_event.is_set()
assert not logged_out_event.is_set()
def test_on_logged_in_event_activates_private_session(
spotify_mock, config, caplog):
session_mock = spotify_mock.Session.return_value
private_session_mock = mock.PropertyMock()
type(session_mock.social).private_session = private_session_mock
config['spotify']['private_session'] = True
backend = get_backend(config, session_mock)
backend.on_logged_in()
assert 'Spotify private session activated' in caplog.text()
private_session_mock.assert_called_once_with(True)
def test_on_logged_in_event_adds_playlist_container_loaded_handler(
spotify_mock, config):
session_mock = spotify_mock.Session.return_value
backend = get_backend(config, session_mock)
backend.on_logged_in()
assert (mock.call(
spotify_mock.PlaylistContainerEvent.CONTAINER_LOADED,
playlists.on_container_loaded)
in session_mock.playlist_container.on.call_args_list)
def test_on_logged_in_event_adds_playlist_added_handler(
spotify_mock, config):
session_mock = spotify_mock.Session.return_value
backend = get_backend(config, session_mock)
backend.on_logged_in()
assert (mock.call(
spotify_mock.PlaylistContainerEvent.PLAYLIST_ADDED,
playlists.on_playlist_added)
in session_mock.playlist_container.on.call_args_list)
def test_on_logged_in_event_adds_playlist_removed_handler(
spotify_mock, config):
session_mock = spotify_mock.Session.return_value
backend = get_backend(config, session_mock)
backend.on_logged_in()
assert (mock.call(
spotify_mock.PlaylistContainerEvent.PLAYLIST_REMOVED,
playlists.on_playlist_removed)
in session_mock.playlist_container.on.call_args_list)
def test_on_logged_in_event_adds_playlist_moved_handler(
spotify_mock, config):
session_mock = spotify_mock.Session.return_value
backend = get_backend(config, session_mock)
backend.on_logged_in()
assert (mock.call(
spotify_mock.PlaylistContainerEvent.PLAYLIST_MOVED,
playlists.on_playlist_moved)
in session_mock.playlist_container.on.call_args_list)
def test_on_play_token_lost_messages_the_actor(spotify_mock, caplog):
session_mock = spotify_mock.Session.return_value
actor_mock = mock.Mock(spec=backend.SpotifyBackend)
backend.on_play_token_lost(session_mock, actor_mock)
assert 'Spotify play token lost' in caplog.text()
actor_mock.on_play_token_lost.assert_called_once_with()
def test_on_play_token_lost_event_when_playing(spotify_mock, config, caplog):
session_mock = spotify_mock.Session.return_value
session_mock.player.state = spotify_mock.PlayerState.PLAYING
backend = get_backend(config, session_mock)
backend.playback = mock.Mock(spec=playback.SpotifyPlaybackProvider)
backend.on_play_token_lost()
assert (
'Spotify has been paused because your account is '
'being used somewhere else.' in caplog.text())
backend.playback.pause.assert_called_once_with()
def test_on_play_token_lost_event_when_not_playing(
spotify_mock, config, caplog):
session_mock = spotify_mock.Session.return_value
session_mock.player.state = spotify_mock.PlayerState.UNLOADED
backend = get_backend(config, session_mock)
backend.playback = mock.Mock(spec=playback.SpotifyPlaybackProvider)
backend.on_play_token_lost()
assert 'Spotify has been paused' not in caplog.text()
assert backend.playback.pause.call_count == 0
|
jodal/mopidy-spotify
|
tests/test_backend.py
|
Python
|
apache-2.0
| 11,347
|
from cart import models
from django.test import TestCase, RequestFactory, Client
from models import Cart, Item
from django.contrib.auth.models import User, AnonymousUser
import datetime
from decimal import Decimal
from cart import Cart
class CartAndItemModelsTestCase(TestCase):
def setUp(self):
self.client = Client()
self.request = RequestFactory()
self.request.user = AnonymousUser()
self.request.session = {}
def _create_cart_in_database(self, creation_date=datetime.datetime.now(),
checked_out=False):
"""
Helper function so I don't repeat myself
"""
cart = models.Cart()
cart.creation_date = creation_date
cart.checked_out = False
cart.save()
return cart
def _create_item_in_database(self, cart, product, quantity=1,
unit_price=Decimal("100")):
"""
Helper function so I don't repeat myself
"""
item = Item()
item.cart = cart
item.product = product
item.quantity = quantity
item.unit_price = unit_price
item.save()
return item
def _create_user_in_database(self):
"""
Helper function so I don't repeat myself
"""
user = User(username="user_for_sell", password="sold",
email="example@example.com")
user.save()
return user
def test_cart_creation(self):
creation_date = datetime.datetime.now()
cart = self._create_cart_in_database(creation_date)
id = cart.id
cart_from_database = models.Cart.objects.get(pk=id)
self.assertEquals(cart, cart_from_database)
def test_item_creation_and_association_with_cart(self):
"""
This test is a little bit tricky since the Item tracks
any model via django's content type framework. This was
made in order to enable you to associate an item in the
cart with your product model.
As I wont make a product model here, I will assume my test
store sells django users (django.contrib.auth.models.User)
(lol) so I can test that this is working.
So if you are reading this test to understand the API,
you just need to change the user for your product model
in your code and you're good to go.
"""
user = self._create_user_in_database()
cart = self._create_cart_in_database()
item = self._create_item_in_database(cart, user, quantity=1, unit_price=Decimal("100"))
# get the first item in the cart
item_in_cart = cart.item_set.all()[0]
self.assertEquals(item_in_cart, item,
"First item in cart should be equal the item we created")
self.assertEquals(item_in_cart.product, user,
"Product associated with the first item in cart should equal the user we're selling")
self.assertEquals(item_in_cart.unit_price, Decimal("100"),
"Unit price of the first item stored in the cart should equal 100")
self.assertEquals(item_in_cart.quantity, 1,
"The first item in cart should have 1 in it's quantity")
def test_total_item_price(self):
"""
Since the unit price is a Decimal field, prefer to associate
unit prices instantiating the Decimal class in
decimal.Decimal.
"""
user = self._create_user_in_database()
cart = self._create_cart_in_database()
# not safe to do as the field is Decimal type. It works for integers but
# doesn't work for float
item_with_unit_price_as_integer = self._create_item_in_database(cart, product=user, quantity=3, unit_price=100)
self.assertEquals(item_with_unit_price_as_integer.total_price, 300)
# this is the right way to associate unit prices
item_with_unit_price_as_decimal = self._create_item_in_database(cart,
product=user, quantity=4, unit_price=Decimal("3.20"))
self.assertEquals(item_with_unit_price_as_decimal.total_price, Decimal("12.80"))
def test_update_cart(self):
user = self._create_user_in_database()
cart = Cart(self.request)
cart.new(self.request)
cart.add(product=user, quantity=3, unit_price=100)
cart.update(product=user, quantity=2, unit_price=200)
self.assertEquals(cart.summary(), 400)
self.assertEquals(cart.count(), 2)
def test_item_unicode(self):
user = self._create_user_in_database()
cart = self._create_cart_in_database()
item = self._create_item_in_database(cart, product=user, quantity=3, unit_price=Decimal("100"))
self.assertEquals(item.__unicode__(), "3 units of User")
|
thodoris/djangoPharma
|
djangoPharma/env/Lib/site-packages/cart/tests.py
|
Python
|
apache-2.0
| 4,851
|
from django.db import models
from django_peeringdb.models.concrete import NetworkIXLan
class Peering(models.Model):
netixlan = models.OneToOneField(
NetworkIXLan, db_index=True, on_delete=models.CASCADE
)
router = models.CharField(max_length=255, db_index=True)
def __repr__(self):
peer_name = self.netixlan.net.name
ixp_name = self.netixlan.ixlan.ix.name
return f"Peering with {peer_name} at {ixp_name}"
|
paravoid/peerassist
|
peercollect/models.py
|
Python
|
apache-2.0
| 457
|
from ..cli import *
import click
import sys
import logging
import context
from ..api import packs as packs_api
from ..api import templates as templates_api
logger = logging.getLogger(__name__)
@cli.group('install')
def install():
"""installs things"""
@click.command(short_help="Install template")
@click.argument('name')
@click.option('--yes', is_flag=True)
def template(name, yes):
try:
if not yes:
click.confirm('This will uninstall and re-install the template as a pack. Are you sure?', abort=True)
pack_list = []
for pack in packs_api.get_packs(**context.settings):
pack_list.append(pack['name'])
if name in pack_list:
resp = packs_api.delete_pack(name=name, **context.settings)
if resp.status_code == 204:
click.echo('Deleted pack ' + name)
else:
click.echo('Error deleting ' + name + '. Status Code: ' + click.style(
str(resp.status_code),
fg='red'))
resp = templates_api.install_template(name=name, **context.settings)
if resp.status_code == 200:
click.echo('Installed pack ' + name)
else:
click.echo('Error installing ' + name + '. Status Code: ' + click.style(
str(resp.status_code),
fg='red'))
except Exception, e:
print 'Install template failed. %s' % e
sys.exit(1)
install.add_command(template)
|
dataloop/dlcli
|
dlcli/cli/install.py
|
Python
|
apache-2.0
| 1,491
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
import time
import logging
import string
import urlparse
from urllib import quote_plus
from lxml import html
from django.http import HttpResponseRedirect, HttpResponse
from django.utils.functional import wraps
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from desktop.log.access import access_warn, access_log_level
from desktop.lib.rest.http_client import RestException
from desktop.lib.rest.resource import Resource
from desktop.lib.django_util import render_json, render, copy_query_dict, encode_json_for_js
from desktop.lib.exceptions import MessageException
from desktop.lib.exceptions_renderable import PopupException
from desktop.views import register_status_bar_view
from hadoop import cluster
from hadoop.api.jobtracker.ttypes import ThriftJobPriority, TaskTrackerNotFoundException, ThriftJobState
from hadoop.yarn.clients import get_log_client
from jobbrowser import conf
from jobbrowser.api import get_api, ApplicationNotRunning
from jobbrowser.models import Job, JobLinkage, Tracker, Cluster, can_view_job, can_modify_job
import urllib2
def check_job_permission(view_func):
"""
Ensure that the user has access to the job.
Assumes that the wrapped function takes a 'jobid' param named 'job'.
"""
def decorate(request, *args, **kwargs):
jobid = kwargs['job']
try:
job = get_api(request.user, request.jt).get_job(jobid=jobid)
except ApplicationNotRunning, e:
# reverse() seems broken, using request.path but beware, it discards GET and POST info
return job_not_assigned(request, jobid, request.path)
except Exception, e:
raise PopupException(_('Could not find job %s.') % jobid, detail=e)
if not conf.SHARE_JOBS.get() and not request.user.is_superuser \
and job.user != request.user.username and not can_view_job(request.user.username, job):
raise PopupException(_("You don't have permission to access job %(id)s.") % {'id': jobid})
kwargs['job'] = job
return view_func(request, *args, **kwargs)
return wraps(view_func)(decorate)
def job_not_assigned(request, jobid, path):
if request.GET.get('format') == 'json':
result = {'status': -1, 'message': ''}
try:
get_api(request.user, request.jt).get_job(jobid=jobid)
result['status'] = 0
except ApplicationNotRunning, e:
result['status'] = 1
except Exception, e:
result['message'] = _('Error polling job %s: %s') % (jobid, e)
return HttpResponse(encode_json_for_js(result), mimetype="application/json")
else:
return render('job_not_assigned.mako', request, {'jobid': jobid, 'path': path})
def jobs(request):
user = request.GET.get('user', request.user.username)
state = request.GET.get('state')
text = request.GET.get('text')
retired = request.GET.get('retired')
if request.GET.get('format') == 'json':
jobs = get_api(request.user, request.jt).get_jobs(user=request.user, username=user, state=state, text=text, retired=retired)
json_jobs = [massage_job_for_json(job, request) for job in jobs]
return HttpResponse(encode_json_for_js(json_jobs), mimetype="application/json")
return render('jobs.mako', request, {
'request': request,
'state_filter': state,
'user_filter': user,
'text_filter': text,
'retired': retired,
'filtered': not (state == 'all' and user == '' and text == ''),
'is_yarn': cluster.is_yarn()
})
def massage_job_for_json(job, request):
job = {
'id': job.jobId,
'shortId': job.jobId_short,
'name': hasattr(job, 'jobName') and job.jobName or '',
'status': job.status,
'url': job.jobId and reverse('jobbrowser.views.single_job', kwargs={'job': job.jobId}) or '',
'logs': job.jobId and reverse('jobbrowser.views.job_single_logs', kwargs={'job': job.jobId}) or '',
'queueName': hasattr(job, 'queueName') and job.queueName or _('N/A'),
'priority': hasattr(job, 'priority') and job.priority.lower() or _('N/A'),
'user': job.user,
'isRetired': job.is_retired,
'isMR2': job.is_mr2,
'mapProgress': hasattr(job, 'mapProgress') and job.mapProgress or '',
'reduceProgress': hasattr(job, 'reduceProgress') and job.reduceProgress or '',
'setupProgress': hasattr(job, 'setupProgress') and job.setupProgress or '',
'cleanupProgress': hasattr(job, 'cleanupProgress') and job.cleanupProgress or '',
'desiredMaps': job.desiredMaps,
'desiredReduces': job.desiredReduces,
'applicationType': hasattr(job, 'applicationType') and job.applicationType or None,
'mapsPercentComplete': int(job.maps_percent_complete) if job.maps_percent_complete else '',
'finishedMaps': job.finishedMaps,
'finishedReduces': job.finishedReduces,
'reducesPercentComplete': int(job.reduces_percent_complete) if job.reduces_percent_complete else '',
'jobFile': hasattr(job, 'jobFile') and job.jobFile or '',
'launchTimeMs': hasattr(job, 'launchTimeMs') and job.launchTimeMs or '',
'launchTimeFormatted': hasattr(job, 'launchTimeFormatted') and job.launchTimeFormatted or '',
'startTimeMs': hasattr(job, 'startTimeMs') and job.startTimeMs or '',
'startTimeFormatted': hasattr(job, 'startTimeFormatted') and job.startTimeFormatted or '',
'finishTimeMs': hasattr(job, 'finishTimeMs') and job.finishTimeMs or '',
'finishTimeFormatted': hasattr(job, 'finishTimeFormatted') and job.finishTimeFormatted or '',
'durationFormatted': hasattr(job, 'durationFormatted') and job.durationFormatted or '',
'durationMs': hasattr(job, 'durationInMillis') and job.durationInMillis or '',
'canKill': job.status.lower() in ('running', 'pending') and (request.user.is_superuser or request.user.username == job.user or can_modify_job(request.user.username, job)),
'killUrl': job.jobId and reverse('jobbrowser.views.kill_job', kwargs={'job': job.jobId}) or ''
}
return job
def massage_task_for_json(task):
task = {
'id': task.taskId,
'shortId': task.taskId_short,
'url': task.taskId and reverse('jobbrowser.views.single_task', kwargs={'job': task.jobId, 'taskid': task.taskId}) or '',
'logs': task.taskAttemptIds and reverse('jobbrowser.views.single_task_attempt_logs', kwargs={'job': task.jobId, 'taskid': task.taskId, 'attemptid': task.taskAttemptIds[-1]}) or '',
'type': task.taskType
}
return task
def single_spark_job(request, job):
if request.REQUEST.get('format') == 'json':
json_job = {
'job': massage_job_for_json(job, request)
}
return HttpResponse(encode_json_for_js(json_job), mimetype="application/json")
else:
return render('job.mako', request, {
'request': request,
'job': job
})
@check_job_permission
def single_job(request, job):
def cmp_exec_time(task1, task2):
return cmp(task1.execStartTimeMs, task2.execStartTimeMs)
if job.applicationType == 'SPARK':
return single_spark_job(request, job)
failed_tasks = job.filter_tasks(task_states=('failed',))
failed_tasks.sort(cmp_exec_time)
recent_tasks = job.filter_tasks(task_states=('running', 'succeeded',))
recent_tasks.sort(cmp_exec_time, reverse=True)
if request.REQUEST.get('format') == 'json':
json_failed_tasks = [massage_task_for_json(task) for task in failed_tasks]
json_recent_tasks = [massage_task_for_json(task) for task in recent_tasks]
json_job = {
'job': massage_job_for_json(job, request),
'failedTasks': json_failed_tasks,
'recentTasks': json_recent_tasks
}
return HttpResponse(encode_json_for_js(json_job), mimetype="application/json")
return render('job.mako', request, {
'request': request,
'job': job,
'failed_tasks': failed_tasks and failed_tasks[:5] or [],
'recent_tasks': recent_tasks and recent_tasks[:5] or [],
})
@check_job_permission
def job_counters(request, job):
return render("counters.html", request, {"counters": job.counters})
@access_log_level(logging.WARN)
@check_job_permission
def kill_job(request, job):
if request.method != "POST":
raise Exception(_("kill_job may only be invoked with a POST (got a %(method)s).") % {'method': request.method})
if job.user != request.user.username and not request.user.is_superuser:
access_warn(request, _('Insufficient permission'))
raise MessageException(_("Permission denied. User %(username)s cannot delete user %(user)s's job.") % {'username': request.user.username, 'user': job.user})
job.kill()
cur_time = time.time()
api = get_api(request.user, request.jt)
while time.time() - cur_time < 15:
job = api.get_job(jobid=job.jobId)
if job.status not in ["RUNNING", "QUEUED"]:
if request.REQUEST.get("next"):
return HttpResponseRedirect(request.REQUEST.get("next"))
elif request.REQUEST.get("format") == "json":
return HttpResponse(encode_json_for_js({'status': 0}), mimetype="application/json")
else:
raise MessageException("Job Killed")
time.sleep(1)
raise Exception(_("Job did not appear as killed within 15 seconds."))
@check_job_permission
def job_attempt_logs(request, job, attempt_index=0):
return render("job_attempt_logs.mako", request, {
"attempt_index": attempt_index,
"job": job,
})
@check_job_permission
def job_attempt_logs_json(request, job, attempt_index=0, name='syslog', offset=0):
"""For async log retrieval as Yarn servers are very slow"""
try:
attempt_index = int(attempt_index)
attempt = job.job_attempts['jobAttempt'][attempt_index]
log_link = attempt['logsLink']
except (KeyError, RestException), e:
raise KeyError(_("Cannot find job attempt '%(id)s'.") % {'id': job.jobId}, e)
link = '/%s/' % name
params = {}
if offset and int(offset) >= 0:
params['start'] = offset
root = Resource(get_log_client(log_link), urlparse.urlsplit(log_link)[2], urlencode=False)
try:
response = root.get(link, params=params)
log = html.fromstring(response).xpath('/html/body/table/tbody/tr/td[2]')[0].text_content()
except Exception, e:
log = _('Failed to retrieve log: %s') % e
response = {'log': log}
return HttpResponse(json.dumps(response), mimetype="application/json")
@check_job_permission
def job_single_logs(request, job):
"""
Try to smartly detect the most useful task attempt (e.g. Oozie launcher, failed task) and get its MR logs.
"""
def cmp_exec_time(task1, task2):
return cmp(task1.execStartTimeMs, task2.execStartTimeMs)
task = None
failed_tasks = job.filter_tasks(task_states=('failed',))
failed_tasks.sort(cmp_exec_time)
if failed_tasks:
task = failed_tasks[0]
else:
task_states = ['running', 'succeeded']
if job.is_mr2:
task_states.append('scheduled')
recent_tasks = job.filter_tasks(task_states=task_states, task_types=('map', 'reduce',))
recent_tasks.sort(cmp_exec_time, reverse=True)
if recent_tasks:
task = recent_tasks[0]
if task is None or not task.taskAttemptIds:
raise PopupException(_("No tasks found for job %(id)s.") % {'id': job.jobId})
return single_task_attempt_logs(request, **{'job': job.jobId, 'taskid': task.taskId, 'attemptid': task.taskAttemptIds[-1]})
@check_job_permission
def tasks(request, job):
"""
We get here from /jobs/job/tasks?filterargs, with the options being:
page=<n> - Controls pagination. Defaults to 1.
tasktype=<type> - Type can be one of hadoop.job_tracker.VALID_TASK_TYPES
("map", "reduce", "job_cleanup", "job_setup")
taskstate=<state> - State can be one of hadoop.job_tracker.VALID_TASK_STATES
("succeeded", "failed", "running", "pending", "killed")
tasktext=<text> - Where <text> is a string matching info on the task
"""
ttypes = request.GET.get('tasktype')
tstates = request.GET.get('taskstate')
ttext = request.GET.get('tasktext')
pagenum = int(request.GET.get('page', 1))
pagenum = pagenum > 0 and pagenum or 1
filters = {
'task_types': ttypes and set(ttypes.split(',')) or None,
'task_states': tstates and set(tstates.split(',')) or None,
'task_text': ttext,
'pagenum': pagenum,
}
jt = get_api(request.user, request.jt)
task_list = jt.get_tasks(job.jobId, **filters)
page = jt.paginate_task(task_list, pagenum)
filter_params = copy_query_dict(request.GET, ('tasktype', 'taskstate', 'tasktext')).urlencode()
return render("tasks.mako", request, {
'request': request,
'filter_params': filter_params,
'job': job,
'page': page,
'tasktype': ttypes,
'taskstate': tstates,
'tasktext': ttext
})
@check_job_permission
def single_task(request, job, taskid):
jt = get_api(request.user, request.jt)
job_link = jt.get_job_link(job.jobId)
task = job_link.get_task(taskid)
return render("task.mako", request, {
'task': task,
'joblnk': job_link
})
@check_job_permission
def single_task_attempt(request, job, taskid, attemptid):
jt = get_api(request.user, request.jt)
job_link = jt.get_job_link(job.jobId)
task = job_link.get_task(taskid)
try:
attempt = task.get_attempt(attemptid)
except (KeyError, RestException), e:
raise PopupException(_("Cannot find attempt '%(id)s' in task") % {'id': attemptid}, e)
return render("attempt.mako", request, {
"attempt": attempt,
"taskid": taskid,
"joblnk": job_link,
"task": task
})
@check_job_permission
def single_task_attempt_logs(request, job, taskid, attemptid):
jt = get_api(request.user, request.jt)
job_link = jt.get_job_link(job.jobId)
task = job_link.get_task(taskid)
try:
attempt = task.get_attempt(attemptid)
except (KeyError, RestException), e:
raise KeyError(_("Cannot find attempt '%(id)s' in task") % {'id': attemptid}, e)
first_log_tab = 0
try:
# Add a diagnostic log
if job_link.is_mr2:
diagnostic_log = attempt.diagnostics
else:
diagnostic_log = ", ".join(task.diagnosticMap[attempt.attemptId])
logs = [diagnostic_log]
# Add remaining logs
logs += [section.strip() for section in attempt.get_task_log()]
log_tab = [i for i, log in enumerate(logs) if log]
if log_tab:
first_log_tab = log_tab[0]
except TaskTrackerNotFoundException:
# Four entries,
# for diagnostic, stdout, stderr and syslog
logs = [_("Failed to retrieve log. TaskTracker not found.")] * 4
except urllib2.URLError:
logs = [_("Failed to retrieve log. TaskTracker not ready.")] * 4
context = {
"attempt": attempt,
"taskid": taskid,
"joblnk": job_link,
"task": task,
"logs": logs,
"first_log_tab": first_log_tab,
}
if request.GET.get('format') == 'python':
return context
elif request.GET.get('format') == 'json':
response = {
"logs": logs,
"isRunning": job.status.lower() in ('running', 'pending', 'prep')
}
return HttpResponse(json.dumps(response), mimetype="application/json")
else:
return render("attempt_logs.mako", request, context)
@check_job_permission
def task_attempt_counters(request, job, taskid, attemptid):
"""
We get here from /jobs/jobid/tasks/taskid/attempts/attemptid/counters
(phew!)
"""
job_link = JobLinkage(request.jt, job.jobId)
task = job_link.get_task(taskid)
attempt = task.get_attempt(attemptid)
counters = {}
if attempt:
counters = attempt.counters
return render("counters.html", request, {'counters':counters})
@access_log_level(logging.WARN)
def kill_task_attempt(request, attemptid):
"""
We get here from /jobs/jobid/tasks/taskid/attempts/attemptid/kill
TODO: security
"""
ret = request.jt.kill_task_attempt(request.jt.thriftattemptid_from_string(attemptid))
return render_json({})
def trackers(request):
"""
We get here from /trackers
"""
trackers = get_tasktrackers(request)
return render("tasktrackers.mako", request, {'trackers':trackers})
def single_tracker(request, trackerid):
jt = get_api(request.user, request.jt)
try:
tracker = jt.get_tracker(trackerid)
except Exception, e:
raise PopupException(_('The tracker could not be contacted.'), detail=e)
return render("tasktracker.mako", request, {'tracker':tracker})
def container(request, node_manager_http_address, containerid):
jt = get_api(request.user, request.jt)
try:
tracker = jt.get_tracker(node_manager_http_address, containerid)
except Exception, e:
# TODO: add a redirect of some kind
raise PopupException(_('The container disappears as soon as the job finishes.'), detail=e)
return render("container.mako", request, {'tracker':tracker})
def clusterstatus(request):
"""
We get here from /clusterstatus
"""
return render("clusterstatus.html", request, Cluster(request.jt))
def queues(request):
"""
We get here from /queues
"""
return render("queues.html", request, { "queuelist" : request.jt.queues()})
@check_job_permission
def set_job_priority(request, job):
"""
We get here from /jobs/job/setpriority?priority=PRIORITY
"""
priority = request.GET.get("priority")
jid = request.jt.thriftjobid_from_string(job.jobId)
request.jt.set_job_priority(jid, ThriftJobPriority._NAMES_TO_VALUES[priority])
return render_json({})
CONF_VARIABLE_REGEX = r"\$\{(.+)\}"
def make_substitutions(conf):
"""
Substitute occurences of ${foo} with conf[foo], recursively, in all the values
of the conf dict.
Note that the Java code may also substitute Java properties in, which
this code does not have.
"""
r = re.compile(CONF_VARIABLE_REGEX)
def sub(s, depth=0):
# Malformed / malicious confs could make this loop infinitely
if depth > 100:
logging.warn("Max recursion depth exceeded when substituting jobconf value: %s" % s)
return s
m = r.search(s)
if m:
for g in [g for g in m.groups() if g in conf]:
substr = "${%s}" % g
s = s.replace(substr, sub(conf[g], depth+1))
return s
for k, v in conf.items():
conf[k] = sub(v)
return conf
##################################
## Helper functions
def get_shorter_id(hadoop_job_id):
return "_".join(hadoop_job_id.split("_")[-2:])
def format_counter_name(s):
"""
Makes counter/config names human readable:
FOOBAR_BAZ -> "Foobar Baz"
foo_barBaz -> "Foo Bar Baz"
"""
def splitCamels(s):
""" Convert "fooBar" to "foo bar" """
return re.sub(r'[a-z][A-Z]',
lambda x: x.group(0)[0] + " " + x.group(0)[1].lower(),
s)
return string.capwords(re.sub('_', ' ', splitCamels(s)).lower())
def get_state_link(request, option=None, val='', VALID_OPTIONS = ("state", "user", "text", "taskstate")):
"""
constructs the query string for the state of the current query for the jobs page.
pass in the request, and an optional option/value pair; these are used for creating
links to turn on the filter, while preserving the other present settings.
"""
states = []
val = quote_plus(val)
assert option is None or option in VALID_OPTIONS
states = dict()
for o in VALID_OPTIONS:
if o in request.GET:
states[o] = request.GET[o]
if option is not None:
states[option] = val
return "&".join([ "%s=%s" % (key, quote_plus(value)) for key, value in states.iteritems() ])
## All Unused below
# DEAD?
def dock_jobs(request):
username = request.user.username
matching_jobs = get_job_count_by_state(request, username)
return render("jobs_dock_info.mako", request, {
'jobs': matching_jobs
}, force_template=True)
register_status_bar_view(dock_jobs)
def get_tasktrackers(request):
"""
Return a ThriftTaskTrackerStatusList object containing all task trackers
"""
return [ Tracker(tracker) for tracker in request.jt.all_task_trackers().trackers]
def get_single_job(request, jobid):
"""
Returns the job which matches jobid.
"""
return Job.from_id(jt=request.jt, jobid=jobid)
def get_job_count_by_state(request, username):
"""
Returns the number of comlpeted, running, and failed jobs for a user.
"""
res = {
'completed': 0,
'running': 0,
'failed': 0,
'killed': 0,
'all': 0
}
jobcounts = request.jt.get_job_count_by_user(username)
res['completed'] = jobcounts.nSucceeded
res['running'] = jobcounts.nPrep + jobcounts.nRunning
res['failed'] = jobcounts.nFailed
res['killed'] = jobcounts.nKilled
res['all'] = res['completed'] + res['running'] + res['failed'] + res['killed']
return res
def jobbrowser(request):
"""
jobbrowser.jsp - a - like.
"""
# TODO(bc): Is this view even reachable?
def check_job_state(state):
return lambda job: job.status == state
status = request.jt.cluster_status()
alljobs = [] #get_matching_jobs(request)
runningjobs = filter(check_job_state('RUNNING'), alljobs)
completedjobs = filter(check_job_state('COMPLETED'), alljobs)
failedjobs = filter(check_job_state('FAILED'), alljobs)
killedjobs = filter(check_job_state('KILLED'), alljobs)
jobqueues = request.jt.queues()
return render("jobbrowser.html", request, {
"clusterstatus" : status,
"queues" : jobqueues,
"alljobs" : alljobs,
"runningjobs" : runningjobs,
"failedjobs" : failedjobs,
"killedjobs" : killedjobs,
"completedjobs" : completedjobs
})
|
yongshengwang/builthue
|
apps/jobbrowser/src/jobbrowser/views.py
|
Python
|
apache-2.0
| 22,012
|
#!/usr/bin/env python
# Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import h5py
import json
import os
import imageio
import sys
import cityscapesscripts.evaluation.instances2dict_with_polygons as cs
import detectron.utils.segms as segms_util
import detectron.utils.boxes as bboxs_util
def parse_args():
parser = argparse.ArgumentParser(description='Convert dataset')
parser.add_argument(
'--dataset', help="cocostuff, cityscapes", default=None, type=str)
parser.add_argument(
'--outdir', help="output dir for json files", default=None, type=str)
parser.add_argument(
'--datadir', help="data dir for annotations to be converted",
default=None, type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def convert_coco_stuff_mat(data_dir, out_dir):
"""Convert to png and save json with path. This currently only contains
the segmentation labels for objects+stuff in cocostuff - if we need to
combine with other labels from original COCO that will be a TODO."""
sets = ['train', 'val']
categories = []
json_name = 'coco_stuff_%s.json'
ann_dict = {}
for data_set in sets:
file_list = os.path.join(data_dir, '%s.txt')
images = []
with open(file_list % data_set) as f:
for img_id, img_name in enumerate(f):
img_name = img_name.replace('coco', 'COCO').strip('\n')
image = {}
mat_file = os.path.join(
data_dir, 'annotations/%s.mat' % img_name)
data = h5py.File(mat_file, 'r')
labelMap = data.get('S')
if len(categories) == 0:
labelNames = data.get('names')
for idx, n in enumerate(labelNames):
categories.append(
{"id": idx, "name": ''.join(chr(i) for i in data[
n[0]])})
ann_dict['categories'] = categories
imageio.imsave(
os.path.join(data_dir, img_name + '.png'), labelMap)
image['width'] = labelMap.shape[0]
image['height'] = labelMap.shape[1]
image['file_name'] = img_name
image['seg_file_name'] = img_name
image['id'] = img_id
images.append(image)
ann_dict['images'] = images
print("Num images: %s" % len(images))
with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile:
outfile.write(json.dumps(ann_dict))
# for Cityscapes
def getLabelID(self, instID):
if (instID < 1000):
return instID
else:
return int(instID / 1000)
def convert_cityscapes_instance_only(
data_dir, out_dir):
"""Convert from cityscapes format to COCO instance seg format - polygons"""
sets = [
'gtFine_val',
# 'gtFine_train',
# 'gtFine_test',
# 'gtCoarse_train',
# 'gtCoarse_val',
# 'gtCoarse_train_extra'
]
ann_dirs = [
'gtFine_trainvaltest/gtFine/val',
# 'gtFine_trainvaltest/gtFine/train',
# 'gtFine_trainvaltest/gtFine/test',
# 'gtCoarse/train',
# 'gtCoarse/train_extra',
# 'gtCoarse/val'
]
json_name = 'instancesonly_filtered_%s.json'
ends_in = '%s_polygons.json'
img_id = 0
ann_id = 0
cat_id = 1
category_dict = {}
category_instancesonly = [
'person',
'rider',
'car',
'truck',
'bus',
'train',
'motorcycle',
'bicycle',
]
for data_set, ann_dir in zip(sets, ann_dirs):
print('Starting %s' % data_set)
ann_dict = {}
images = []
annotations = []
ann_dir = os.path.join(data_dir, ann_dir)
for root, _, files in os.walk(ann_dir):
for filename in files:
if filename.endswith(ends_in % data_set.split('_')[0]):
if len(images) % 50 == 0:
print("Processed %s images, %s annotations" % (
len(images), len(annotations)))
json_ann = json.load(open(os.path.join(root, filename)))
image = {}
image['id'] = img_id
img_id += 1
image['width'] = json_ann['imgWidth']
image['height'] = json_ann['imgHeight']
image['file_name'] = filename[:-len(
ends_in % data_set.split('_')[0])] + 'leftImg8bit.png'
image['seg_file_name'] = filename[:-len(
ends_in % data_set.split('_')[0])] + \
'%s_instanceIds.png' % data_set.split('_')[0]
images.append(image)
fullname = os.path.join(root, image['seg_file_name'])
objects = cs.instances2dict_with_polygons(
[fullname], verbose=False)[fullname]
for object_cls in objects:
if object_cls not in category_instancesonly:
continue # skip non-instance categories
for obj in objects[object_cls]:
if obj['contours'] == []:
print('Warning: empty contours.')
continue # skip non-instance categories
len_p = [len(p) for p in obj['contours']]
if min(len_p) <= 4:
print('Warning: invalid contours.')
continue # skip non-instance categories
ann = {}
ann['id'] = ann_id
ann_id += 1
ann['image_id'] = image['id']
ann['segmentation'] = obj['contours']
if object_cls not in category_dict:
category_dict[object_cls] = cat_id
cat_id += 1
ann['category_id'] = category_dict[object_cls]
ann['iscrowd'] = 0
ann['area'] = obj['pixelCount']
ann['bbox'] = bboxs_util.xyxy_to_xywh(
segms_util.polys_to_boxes(
[ann['segmentation']])).tolist()[0]
annotations.append(ann)
ann_dict['images'] = images
categories = [{"id": category_dict[name], "name": name} for name in
category_dict]
ann_dict['categories'] = categories
ann_dict['annotations'] = annotations
print("Num categories: %s" % len(categories))
print("Num images: %s" % len(images))
print("Num annotations: %s" % len(annotations))
with open(os.path.join(out_dir, json_name % data_set), 'wb') as outfile:
outfile.write(json.dumps(ann_dict))
if __name__ == '__main__':
args = parse_args()
if args.dataset == "cityscapes_instance_only":
convert_cityscapes_instance_only(args.datadir, args.outdir)
elif args.dataset == "cocostuff":
convert_coco_stuff_mat(args.datadir, args.outdir)
else:
print("Dataset not supported: %s" % args.dataset)
|
facebookresearch/Detectron
|
tools/convert_cityscapes_to_coco.py
|
Python
|
apache-2.0
| 8,287
|
# -*- coding: utf-8 -*-
from openprocurement.api.utils import (
get_file,
upload_file,
update_file_content_type,
json_view,
context_unpack,
APIResource,
)
from openprocurement.api.validation import (
validate_file_update, validate_file_upload, validate_patch_document_data,
)
from openprocurement.tender.core.validation import (
validate_complaint_document_update_not_by_author,
validate_status_and_role_for_complaint_document_operation,
validate_award_complaint_document_operation_only_for_active_lots,
validate_award_complaint_document_operation_not_in_allowed_status
)
from openprocurement.tender.core.utils import (
save_tender, optendersresource, apply_patch,
)
from openprocurement.tender.belowthreshold.validation import (
validate_role_and_status_for_add_complaint_document
)
@optendersresource(name='belowThreshold:Tender Award Complaint Documents',
collection_path='/tenders/{tender_id}/awards/{award_id}/complaints/{complaint_id}/documents',
path='/tenders/{tender_id}/awards/{award_id}/complaints/{complaint_id}/documents/{document_id}',
procurementMethodType='belowThreshold',
description="Tender award complaint documents")
class TenderAwardComplaintDocumentResource(APIResource):
@json_view(permission='view_tender')
def collection_get(self):
"""Tender Award Complaint Documents List"""
if self.request.params.get('all', ''):
collection_data = [i.serialize("view") for i in self.context.documents]
else:
collection_data = sorted(dict([
(i.id, i.serialize("view"))
for i in self.context.documents
]).values(), key=lambda i: i['dateModified'])
return {'data': collection_data}
@json_view(permission='edit_complaint', validators=(validate_file_upload, validate_award_complaint_document_operation_not_in_allowed_status,
validate_award_complaint_document_operation_only_for_active_lots, validate_role_and_status_for_add_complaint_document))
def collection_post(self):
"""Tender Award Complaint Document Upload
"""
document = upload_file(self.request)
document.author = self.request.authenticated_role
self.context.documents.append(document)
if save_tender(self.request):
self.LOGGER.info('Created tender award complaint document {}'.format(document.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_award_complaint_document_create'}, {'document_id': document.id}))
self.request.response.status = 201
document_route = self.request.matched_route.name.replace("collection_", "")
self.request.response.headers['Location'] = self.request.current_route_url(_route_name=document_route, document_id=document.id, _query={})
return {'data': document.serialize("view")}
@json_view(permission='view_tender')
def get(self):
"""Tender Award Complaint Document Read"""
if self.request.params.get('download'):
return get_file(self.request)
document = self.request.validated['document']
document_data = document.serialize("view")
document_data['previousVersions'] = [
i.serialize("view")
for i in self.request.validated['documents']
if i.url != document.url
]
return {'data': document_data}
@json_view(validators=(validate_file_update, validate_complaint_document_update_not_by_author, validate_award_complaint_document_operation_not_in_allowed_status, validate_award_complaint_document_operation_only_for_active_lots,
validate_status_and_role_for_complaint_document_operation), permission='edit_complaint')
def put(self):
"""Tender Award Complaint Document Update"""
document = upload_file(self.request)
document.author = self.request.authenticated_role
self.request.validated['complaint'].documents.append(document)
if save_tender(self.request):
self.LOGGER.info('Updated tender award complaint document {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_award_complaint_document_put'}))
return {'data': document.serialize("view")}
@json_view(content_type="application/json", validators=(validate_patch_document_data, validate_complaint_document_update_not_by_author, validate_award_complaint_document_operation_not_in_allowed_status,
validate_award_complaint_document_operation_only_for_active_lots, validate_status_and_role_for_complaint_document_operation), permission='edit_complaint')
def patch(self):
"""Tender Award Complaint Document Update"""
if apply_patch(self.request, src=self.request.context.serialize()):
update_file_content_type(self.request)
self.LOGGER.info('Updated tender award complaint document {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_award_complaint_document_patch'}))
return {'data': self.request.context.serialize("view")}
|
openprocurement/openprocurement.tender.belowthreshold
|
openprocurement/tender/belowthreshold/views/award_complaint_document.py
|
Python
|
apache-2.0
| 5,279
|
# python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_namespace_packages
from setuptools import setup
DESCRIPTION = """Orchestrate resources directly from the command-line."""
REQUIREMENTS = """
grpcio
grpcio-tools
requests
""".strip().split()
setup(
name='orchestratecli',
setup_requires=['setuptools_scm'],
use_scm_version=dict(root='..', relative_to=__file__),
description=DESCRIPTION,
long_description=DESCRIPTION,
author='Luis Artola',
author_email='luisartola@google.com',
url='https://github.com/GoogleCloudPlatform/solutions-cloud-orchestrate',
package_dir={'': 'src'},
packages=find_namespace_packages(where='src'),
entry_points=dict(
console_scripts=[
'orchestrate = orchestrate.main:main',
],
),
install_requires=REQUIREMENTS,
include_package_data=True,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python :: 3',
'Operating System :: OS Independent',
'License :: Other/Proprietary License',
'Natural Language :: English',
'Topic :: System :: Systems Administration',
],
)
|
GoogleCloudPlatform/solutions-cloud-orchestrate
|
cli/setup.py
|
Python
|
apache-2.0
| 1,724
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1beta1.types import (
completion_stats as gca_completion_stats,
)
from google.cloud.aiplatform_v1beta1.types import encryption_spec as gca_encryption_spec
from google.cloud.aiplatform_v1beta1.types import explanation
from google.cloud.aiplatform_v1beta1.types import io
from google.cloud.aiplatform_v1beta1.types import job_state
from google.cloud.aiplatform_v1beta1.types import machine_resources
from google.cloud.aiplatform_v1beta1.types import (
manual_batch_tuning_parameters as gca_manual_batch_tuning_parameters,
)
from google.cloud.aiplatform_v1beta1.types import (
unmanaged_container_model as gca_unmanaged_container_model,
)
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1", manifest={"BatchPredictionJob",},
)
class BatchPredictionJob(proto.Message):
r"""A job that uses a
[Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to
produce predictions on multiple [input
instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config].
If predictions for significant portion of the instances fail, the
job may finish without attempting predictions for all remaining
instances.
Attributes:
name (str):
Output only. Resource name of the
BatchPredictionJob.
display_name (str):
Required. The user-defined name of this
BatchPredictionJob.
model (str):
The name of the Model resoure that produces the predictions
via this job, must share the same ancestor Location.
Starting this job has no impact on any existing deployments
of the Model and their resources. Exactly one of model and
unmanaged_container_model must be set.
unmanaged_container_model (google.cloud.aiplatform_v1beta1.types.UnmanagedContainerModel):
Contains model information necessary to perform batch
prediction without requiring uploading to model registry.
Exactly one of model and unmanaged_container_model must be
set.
input_config (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.InputConfig):
Required. Input configuration of the instances on which
predictions are performed. The schema of any single instance
may be specified via the
[Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model]
[PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
[instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri].
model_parameters (google.protobuf.struct_pb2.Value):
The parameters that govern the predictions. The schema of
the parameters may be specified via the
[Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model]
[PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
[parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri].
output_config (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.OutputConfig):
Required. The Configuration specifying where output
predictions should be written. The schema of any single
prediction may be specified as a concatenation of
[Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model]
[PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata]
[instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]
and
[prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri].
dedicated_resources (google.cloud.aiplatform_v1beta1.types.BatchDedicatedResources):
The config of resources used by the Model during the batch
prediction. If the Model
[supports][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types]
DEDICATED_RESOURCES this config may be provided (and the job
will use these resources), if the Model doesn't support
AUTOMATIC_RESOURCES, this config must be provided.
manual_batch_tuning_parameters (google.cloud.aiplatform_v1beta1.types.ManualBatchTuningParameters):
Immutable. Parameters configuring the batch behavior.
Currently only applicable when
[dedicated_resources][google.cloud.aiplatform.v1beta1.BatchPredictionJob.dedicated_resources]
are used (in other cases Vertex AI does the tuning itself).
generate_explanation (bool):
Generate explanation with the batch prediction results.
When set to ``true``, the batch prediction output changes
based on the ``predictions_format`` field of the
[BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]
object:
- ``bigquery``: output includes a column named
``explanation``. The value is a struct that conforms to
the
[Explanation][google.cloud.aiplatform.v1beta1.Explanation]
object.
- ``jsonl``: The JSON objects on each line include an
additional entry keyed ``explanation``. The value of the
entry is a JSON object that conforms to the
[Explanation][google.cloud.aiplatform.v1beta1.Explanation]
object.
- ``csv``: Generating explanations for CSV format is not
supported.
If this field is set to true, either the
[Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]
or
[explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec]
must be populated.
explanation_spec (google.cloud.aiplatform_v1beta1.types.ExplanationSpec):
Explanation configuration for this BatchPredictionJob. Can
be specified only if
[generate_explanation][google.cloud.aiplatform.v1beta1.BatchPredictionJob.generate_explanation]
is set to ``true``.
This value overrides the value of
[Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec].
All fields of
[explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec]
are optional in the request. If a field of the
[explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec]
object is not populated, the corresponding field of the
[Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]
object is inherited.
output_info (google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.OutputInfo):
Output only. Information further describing
the output of this job.
state (google.cloud.aiplatform_v1beta1.types.JobState):
Output only. The detailed state of the job.
error (google.rpc.status_pb2.Status):
Output only. Only populated when the job's state is
JOB_STATE_FAILED or JOB_STATE_CANCELLED.
partial_failures (Sequence[google.rpc.status_pb2.Status]):
Output only. Partial failures encountered.
For example, single files that can't be read.
This field never exceeds 20 entries.
Status details fields contain standard GCP error
details.
resources_consumed (google.cloud.aiplatform_v1beta1.types.ResourcesConsumed):
Output only. Information about resources that
had been consumed by this job. Provided in real
time at best effort basis, as well as a final
value once the job completes.
Note: This field currently may be not populated
for batch predictions that use AutoML Models.
completion_stats (google.cloud.aiplatform_v1beta1.types.CompletionStats):
Output only. Statistics on completed and
failed prediction instances.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when the BatchPredictionJob
was created.
start_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when the BatchPredictionJob for the first
time entered the ``JOB_STATE_RUNNING`` state.
end_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when the BatchPredictionJob entered any of
the following states: ``JOB_STATE_SUCCEEDED``,
``JOB_STATE_FAILED``, ``JOB_STATE_CANCELLED``.
update_time (google.protobuf.timestamp_pb2.Timestamp):
Output only. Time when the BatchPredictionJob
was most recently updated.
labels (Sequence[google.cloud.aiplatform_v1beta1.types.BatchPredictionJob.LabelsEntry]):
The labels with user-defined metadata to
organize BatchPredictionJobs.
Label keys and values can be no longer than 64
characters (Unicode codepoints), can only
contain lowercase letters, numeric characters,
underscores and dashes. International characters
are allowed.
See https://goo.gl/xmQnxf for more information
and examples of labels.
encryption_spec (google.cloud.aiplatform_v1beta1.types.EncryptionSpec):
Customer-managed encryption key options for a
BatchPredictionJob. If this is set, then all
resources created by the BatchPredictionJob will
be encrypted with the provided encryption key.
"""
class InputConfig(proto.Message):
r"""Configures the input to
[BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob].
See
[Model.supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats]
for Model's supported input formats, and how instances should be
expressed via any of them.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
gcs_source (google.cloud.aiplatform_v1beta1.types.GcsSource):
The Cloud Storage location for the input
instances.
This field is a member of `oneof`_ ``source``.
bigquery_source (google.cloud.aiplatform_v1beta1.types.BigQuerySource):
The BigQuery location of the input table.
The schema of the table should be in the format
described by the given context OpenAPI Schema,
if one is provided. The table may contain
additional columns that are not described by the
schema, and they will be ignored.
This field is a member of `oneof`_ ``source``.
instances_format (str):
Required. The format in which instances are given, must be
one of the
[Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model]
[supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats].
"""
gcs_source = proto.Field(
proto.MESSAGE, number=2, oneof="source", message=io.GcsSource,
)
bigquery_source = proto.Field(
proto.MESSAGE, number=3, oneof="source", message=io.BigQuerySource,
)
instances_format = proto.Field(proto.STRING, number=1,)
class OutputConfig(proto.Message):
r"""Configures the output of
[BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob].
See
[Model.supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats]
for supported output formats, and how predictions are expressed via
any of them.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
gcs_destination (google.cloud.aiplatform_v1beta1.types.GcsDestination):
The Cloud Storage location of the directory where the output
is to be written to. In the given directory a new directory
is created. Its name is
``prediction-<model-display-name>-<job-create-time>``, where
timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format.
Inside of it files ``predictions_0001.<extension>``,
``predictions_0002.<extension>``, ...,
``predictions_N.<extension>`` are created where
``<extension>`` depends on chosen
[predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format],
and N may equal 0001 and depends on the total number of
successfully predicted instances. If the Model has both
[instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]
and
[prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]
schemata defined then each such file contains predictions as
per the
[predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format].
If prediction for any instance failed (partially or
completely), then an additional ``errors_0001.<extension>``,
``errors_0002.<extension>``,..., ``errors_N.<extension>``
files are created (N depends on total number of failed
predictions). These files contain the failed instances, as
per their schema, followed by an additional ``error`` field
which as value has [google.rpc.Status][google.rpc.Status]
containing only ``code`` and ``message`` fields.
This field is a member of `oneof`_ ``destination``.
bigquery_destination (google.cloud.aiplatform_v1beta1.types.BigQueryDestination):
The BigQuery project or dataset location where the output is
to be written to. If project is provided, a new dataset is
created with name
``prediction_<model-display-name>_<job-create-time>`` where
is made BigQuery-dataset-name compatible (for example, most
special characters become underscores), and timestamp is in
YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the
dataset two tables will be created, ``predictions``, and
``errors``. If the Model has both
[instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]
and
[prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]
schemata defined then the tables have columns as follows:
The ``predictions`` table contains instances for which the
prediction succeeded, it has columns as per a concatenation
of the Model's instance and prediction schemata. The
``errors`` table contains rows for which the prediction has
failed, it has instance columns, as per the instance schema,
followed by a single "errors" column, which as values has
[google.rpc.Status][google.rpc.Status] represented as a
STRUCT, and containing only ``code`` and ``message``.
This field is a member of `oneof`_ ``destination``.
predictions_format (str):
Required. The format in which Vertex AI gives the
predictions, must be one of the
[Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model]
[supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats].
"""
gcs_destination = proto.Field(
proto.MESSAGE, number=2, oneof="destination", message=io.GcsDestination,
)
bigquery_destination = proto.Field(
proto.MESSAGE,
number=3,
oneof="destination",
message=io.BigQueryDestination,
)
predictions_format = proto.Field(proto.STRING, number=1,)
class OutputInfo(proto.Message):
r"""Further describes this job's output. Supplements
[output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config].
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
gcs_output_directory (str):
Output only. The full path of the Cloud
Storage directory created, into which the
prediction output is written.
This field is a member of `oneof`_ ``output_location``.
bigquery_output_dataset (str):
Output only. The path of the BigQuery dataset created, in
``bq://projectId.bqDatasetId`` format, into which the
prediction output is written.
This field is a member of `oneof`_ ``output_location``.
bigquery_output_table (str):
Output only. The name of the BigQuery table created, in
``predictions_<timestamp>`` format, into which the
prediction output is written. Can be used by UI to generate
the BigQuery output path, for example.
"""
gcs_output_directory = proto.Field(
proto.STRING, number=1, oneof="output_location",
)
bigquery_output_dataset = proto.Field(
proto.STRING, number=2, oneof="output_location",
)
bigquery_output_table = proto.Field(proto.STRING, number=4,)
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING, number=2,)
model = proto.Field(proto.STRING, number=3,)
unmanaged_container_model = proto.Field(
proto.MESSAGE,
number=28,
message=gca_unmanaged_container_model.UnmanagedContainerModel,
)
input_config = proto.Field(proto.MESSAGE, number=4, message=InputConfig,)
model_parameters = proto.Field(proto.MESSAGE, number=5, message=struct_pb2.Value,)
output_config = proto.Field(proto.MESSAGE, number=6, message=OutputConfig,)
dedicated_resources = proto.Field(
proto.MESSAGE, number=7, message=machine_resources.BatchDedicatedResources,
)
manual_batch_tuning_parameters = proto.Field(
proto.MESSAGE,
number=8,
message=gca_manual_batch_tuning_parameters.ManualBatchTuningParameters,
)
generate_explanation = proto.Field(proto.BOOL, number=23,)
explanation_spec = proto.Field(
proto.MESSAGE, number=25, message=explanation.ExplanationSpec,
)
output_info = proto.Field(proto.MESSAGE, number=9, message=OutputInfo,)
state = proto.Field(proto.ENUM, number=10, enum=job_state.JobState,)
error = proto.Field(proto.MESSAGE, number=11, message=status_pb2.Status,)
partial_failures = proto.RepeatedField(
proto.MESSAGE, number=12, message=status_pb2.Status,
)
resources_consumed = proto.Field(
proto.MESSAGE, number=13, message=machine_resources.ResourcesConsumed,
)
completion_stats = proto.Field(
proto.MESSAGE, number=14, message=gca_completion_stats.CompletionStats,
)
create_time = proto.Field(
proto.MESSAGE, number=15, message=timestamp_pb2.Timestamp,
)
start_time = proto.Field(proto.MESSAGE, number=16, message=timestamp_pb2.Timestamp,)
end_time = proto.Field(proto.MESSAGE, number=17, message=timestamp_pb2.Timestamp,)
update_time = proto.Field(
proto.MESSAGE, number=18, message=timestamp_pb2.Timestamp,
)
labels = proto.MapField(proto.STRING, proto.STRING, number=19,)
encryption_spec = proto.Field(
proto.MESSAGE, number=24, message=gca_encryption_spec.EncryptionSpec,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
googleapis/python-aiplatform
|
google/cloud/aiplatform_v1beta1/types/batch_prediction_job.py
|
Python
|
apache-2.0
| 22,107
|
# coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import wavefront_api_client
from wavefront_api_client.models.history_response import HistoryResponse # noqa: E501
from wavefront_api_client.rest import ApiException
class TestHistoryResponse(unittest.TestCase):
"""HistoryResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testHistoryResponse(self):
"""Test HistoryResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = wavefront_api_client.models.history_response.HistoryResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
wavefrontHQ/python-client
|
test/test_history_response.py
|
Python
|
apache-2.0
| 1,290
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from hyperledger.client import Client
# import base64
import json
import sys
import time
API_URL = 'http://127.0.0.1:5000'
def query_value(chaincode_name, arg_list):
"""
Query a list of values.
:param chaincode_name: The name of the chaincode.
:param arg_list: List of arguments.
:return: A list of values.
"""
result, resp = [], {}
print("Query value will try at most 20 times.")
for arg in arg_list:
for i in range(20):
try:
resp = c.chaincode_query(chaincode_name=chaincode_name,
function="query",
args=[arg])
if resp['result']['status'] == 'OK':
result.append(resp['result']['message'])
break
except KeyError:
print("Wait 1 seconds for the {0} query".format(i))
time.sleep(1)
return result
# Usage:
# * python function_test.py [API_URL=http://127.0.0.1:5000] will deploy first
# * python function_test.py [API_URL=http://127.0.0.1:5000] [chaincode_name]
# E.g.,
# "f389486d91f54d1f8775940f24b1d3bd9f8a8e75d364e158ac92328ddacad629607a3c42be156fc4a7da7173adca2ac7d7eef29afc59c6f07f3ad14abee34f68"
if __name__ == '__main__':
if len(sys.argv) not in [2, 3]:
print("Usage: python function_test.py ["
"API_URL=http://127.0.0.1:5000] [chaincode_name]")
exit()
API_URL = sys.argv[1]
chaincode_name = ""
if len(sys.argv) == 3:
chaincode_name = sys.argv[2]
c = Client(base_url=API_URL)
print("Checking cluster at {}".format(API_URL))
if not chaincode_name:
print(">>>Test: deploy the default chaincode")
res = c.chaincode_deploy(args=["a", "10000", "b", "20000"])
chaincode_name = res['result']['message']
assert res['result']['status'] == 'OK'
print("Successfully deploy chaincode with returned name = " +
chaincode_name)
print("Wait 15 seconds to make sure deployment is done.")
time.sleep(15)
print(">>>Check the initial value: a, b")
values = query_value(chaincode_name, ["a", "b"])
print(values)
assert values == ['10000', '20000']
print(">>>Test: invoke a chaincode: a-->b 1")
res = c.chaincode_invoke(chaincode_name=chaincode_name, function="invoke",
args=["a", "b", "1"])
assert res["result"]["status"] == "OK"
transaction_uuid = res["result"]["message"]
print("Transaction id = {0}".format(transaction_uuid))
# TODO: sleep 3 seconds till invoke done.
print("Wait 5 seconds to make sure invoke is done.")
time.sleep(5)
print(">>>Check the after value: a, b")
values = query_value(chaincode_name, ["a", "b"])
print(values)
assert values == ['9999', '20001']
time.sleep(1)
print(">>>Test: Check the transaction content")
res = c.transaction_get(transaction_uuid)
# res["chaincodeID"] = base64.b64decode(res["chaincodeID"])
print(json.dumps(res, sort_keys=True, indent=4))
assert res["uuid"] == transaction_uuid
print(">>>Test: list the peers")
res = c.peer_list()
print(json.dumps(res, sort_keys=True, indent=4))
assert len(res['peers']) > 0
print(">>>Test: list the chain")
res = c.chain_list()
print(json.dumps(res, sort_keys=True, indent=4))
assert res['height'] > 0
print("Existing block number = {0}".format(res["height"]))
print(">>>Test: get the content of block 1")
res = c.block_get(block='1')
print(json.dumps(res, sort_keys=True, indent=4))
print(">>>Test: get the content of block 2")
res = c.block_get(block='2')
print(json.dumps(res, sort_keys=True, indent=4))
|
alvanieto/hyperledger-py
|
tests/function_test.py
|
Python
|
apache-2.0
| 4,375
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
class Config:
DB_USER = os.getenv("DB_USER", 'postgres')
DB_PASSWORD = os.getenv("DB_PASSWORD", None)
DB_NAME = os.getenv("DB_NAME", "postgres")
DB_HOST = os.getenv("DB_HOST", "db")
DB_PORT = os.getenv("DB_PORT", 5432)
DB_ENGINE = os.getenv("DB_ENGINE", "postgresql")
SQLALCHEMY_DATABASE_URI = '{db_engine}://{user_name}:{password}@{hostname}/{database}'.\
format_map({
'db_engine': DB_ENGINE,
'user_name': DB_USER,
'password': DB_PASSWORD,
'hostname': DB_HOST,
'database': DB_NAME
})
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
class TestingConfig(Config):
TESTING = True
class ProductionConfig(Config):
pass
config = {
"development": DevelopmentConfig,
"testing": TestingConfig,
"production": ProductionConfig,
"default": DevelopmentConfig
}
|
ee-book/api
|
config.py
|
Python
|
apache-2.0
| 1,156
|
# Copyright 2015 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This module contains definitions for the base classes of all resource
translation Exceptions which may be raised.
"""
class MissingFieldException(Exception):
""" MissingFieldException is raised whenever a given Heat/CFN resource is
missing a field which is mandatory for the translation process.
"""
pass
class InvalidFieldException(Exception):
""" InvalidFieldException is raised whenever a given Heat/CFN resource
field contains an invalid value.
"""
pass
class HeatResourceNotFoundException(Exception):
""" HeatResourceNotFoundException is raised whenever a required resource is
not found amongst the resources defined within the provided Heat template.
"""
pass
class ARMResourceNotFoundException(Exception):
""" ARMResourceNotFoundException is raised whenever a required ARM resource
is not present post-initial translation.
"""
pass
|
cloudbase/heat2arm
|
heat2arm/translators/exceptions.py
|
Python
|
apache-2.0
| 1,562
|
# Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from sahara import context
from sahara.i18n import _LI
import sahara.plugins.mapr.domain.configuration_file as bcf
import sahara.plugins.mapr.domain.node_process as np
import sahara.plugins.mapr.domain.service as s
import sahara.plugins.mapr.util.validation_utils as vu
import sahara.plugins.provisioning as p
from sahara.utils import files
LOG = logging.getLogger(__name__)
CLDB = np.NodeProcess(
name='cldb',
ui_name='CLDB',
package='mapr-cldb',
open_ports=[7222, 7220, 7221]
)
FILE_SERVER = np.NodeProcess(
name='fileserver',
ui_name='FileServer',
package='mapr-fileserver',
open_ports=[]
)
NFS = np.NodeProcess(
name='nfs',
ui_name='NFS',
package='mapr-nfs',
open_ports=[2049, 9997, 9998]
)
@six.add_metaclass(s.Single)
class MapRFS(s.Service):
_CREATE_DISK_LIST = 'plugins/mapr/resources/create_disk_list_file.sh'
_DISK_SETUP_CMD = '/opt/mapr/server/disksetup -F /tmp/disk.list'
_DISK_SETUP_TIMEOUT = 600
ENABLE_MAPR_DB_NAME = 'Enable MapR-DB'
ENABLE_MAPR_DB_CONFIG = p.Config(
name=ENABLE_MAPR_DB_NAME,
applicable_target='general',
scope='cluster',
config_type="bool",
priority=1,
default_value=True,
description=_LI('Specifies that MapR-DB is in use.')
)
def __init__(self):
super(MapRFS, self).__init__()
self.ui_name = 'MapRFS'
self.node_processes = [CLDB, FILE_SERVER, NFS]
self.ui_info = [
('Container Location Database (CLDB)', CLDB, 'http://%s:7221'),
]
self._validation_rules = [
vu.at_least(1, CLDB),
vu.each_node_has(FILE_SERVER),
vu.on_same_node(CLDB, FILE_SERVER),
]
def post_install(self, cluster_context, instances):
LOG.info(_LI('START: Initializing MapR-FS.'))
if not instances:
instances = cluster_context.get_instances(
FILE_SERVER.get_ui_name())
with context.ThreadGroup() as tg:
fs_name = FILE_SERVER.get_ui_name()
file_servers = cluster_context.filter_instances(instances, fs_name)
for instance in file_servers:
tg.spawn('init-mfs-%s' % instance.id,
self._init_mfs_instance, instance)
LOG.info(_LI('END: Initializing MapR-FS.'))
def _init_mfs_instance(self, instance):
LOG.info(_LI('START: Setup MapR-FS on instance=%s'),
instance.management_ip)
self._generate_disk_list_file(instance, self._CREATE_DISK_LIST)
self._execute_disksetup(instance)
LOG.info(_LI('END: Setup MapR-FS on instance=%s'),
instance.management_ip)
def _generate_disk_list_file(self, instance, path_to_disk_setup_script):
LOG.info(_LI('START: Creating disk list file.'))
script_path = '/tmp/disk_setup_script.sh'
with instance.remote() as r:
LOG.debug('Writing /tmp/disk_setup_script.sh')
r.write_file_to(
script_path, files.get_file_text(path_to_disk_setup_script))
LOG.debug('Start executing command: chmod +x %s', script_path)
r.execute_command('chmod +x ' + script_path, run_as_root=True)
LOG.debug('Done for executing command.')
args = ' '.join(instance.node_group.storage_paths())
cmd = '%s %s' % (script_path, args)
LOG.debug('Executing %s', cmd)
r.execute_command(cmd, run_as_root=True)
LOG.info(_LI('END: Creating disk list file.'))
def _execute_disksetup(self, instance):
LOG.info(_LI('START: Executing disksetup on instance=%s'),
instance.management_ip)
with instance.remote() as rmt:
rmt.execute_command(
self._DISK_SETUP_CMD, run_as_root=True,
timeout=self._DISK_SETUP_TIMEOUT)
LOG.info(_LI('END: Executing disksetup on instance=%s'),
instance.management_ip)
def get_configs(self):
return [MapRFS.ENABLE_MAPR_DB_CONFIG]
def get_config_files(self, cluster_context, configs, instance=None):
default_path = 'plugins/mapr/services/maprfs/resources/cldb.conf'
cldb_conf = bcf.PropertiesFile("cldb.conf")
cldb_conf.remote_path = "/opt/mapr/conf/"
if instance:
cldb_conf.fetch(instance)
cldb_conf.parse(files.get_file_text(default_path))
cldb_conf.add_properties(self._get_cldb_conf_props(cluster_context))
return [cldb_conf]
def _get_cldb_conf_props(self, context):
zookeepers = context.get_zookeeper_nodes_ip_with_port()
result = {'cldb.zookeeper.servers': zookeepers}
if context.is_node_aware:
result['net.topology.script.file.name'] = '/opt/mapr/topology.sh'
return result
|
mapr/sahara
|
sahara/plugins/mapr/services/maprfs/maprfs.py
|
Python
|
apache-2.0
| 5,459
|
from flask import Flask
# note: I think this should call the setup, but it doesn't... I'd need to
# call app.run()... hmmm.... I need to learn more about how Flask does this.
#from flask.ext.cqlengine import CQLEngine
# Not sure if a database script is supposed to have an app, and thus is
# privy to app's settings, like the connection info.
from cqlengine import connection
connection.setup(['127.0.0.1'])
from cqlengine.models import Model
from cqlengine.management import sync_table
def init_db():
# import all modules here that might define models so that
# they will be registered properly on the metadata. Otherwise
# you will have to import them first before calling init_db()
import models
model_attributes = [getattr(models, name) for name in dir(models)]
entities = []
for e in model_attributes:
try:
if issubclass(e, Model) and e != Model:
entities.append(e)
except:
pass
print entities
#...and create your CQL table
for e in entities:
print 'creating table for {0}'.format(e)
sync_table(e)
if __name__ == '__main__':
init_db()
|
chillinc/Flask-CQLEngine
|
testapp/database.py
|
Python
|
apache-2.0
| 1,163
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import warnings
import google.auth
from googleapiclient import discovery
from vm_network_migration.handler_helper.selfLink_executor import SelfLinkExecutor
from vm_network_migration_end_to_end_tests.build_test_resource import TestResourceCreator
from vm_network_migration_end_to_end_tests.check_result import *
from vm_network_migration_end_to_end_tests.google_api_interface import GoogleApiInterface
from vm_network_migration_end_to_end_tests.utils import *
class TestInternalForwardingRuleMigration(unittest.TestCase):
def setUp(self):
print('Initialize test environment.')
project = os.environ["PROJECT_ID"]
credentials, default_project = google.auth.default()
self.compute = discovery.build('compute', 'v1', credentials=credentials)
self.google_api_interface = GoogleApiInterface(self.compute,
project,
'us-central1',
'us-central1-a')
self.test_resource_creator = TestResourceCreator(
self.google_api_interface)
def testWithBackendServiceAttached(self):
""" A backend service is in use by this forwarding rule
Expectation: both the forwarding rule and the backend service will be migrated.
"""
### create test resources
forwarding_rule_name = 'end-to-end-test-forwarding-rule'
backend_service_name = 'end-to-end-test-backend-service'
group_name_1 = 'end-to-end-test-managed-instance-group-1'
operation = self.test_resource_creator.create_regional_managed_instance_group(
self.test_resource_creator.legacy_instance_template_selfLink,
group_name_1,
'sample_multi_zone_managed_instance_group.json',
)
instance_group_1_selfLink = operation['targetLink'].replace(
'/instanceGroupManagers/', '/instanceGroups/')
original_instance_template_1_configs = self.google_api_interface.get_multi_zone_instance_template_configs(
group_name_1)
backend_service_selfLink = \
self.test_resource_creator.create_regional_backend_service(
'sample_internal_backend_service.json',
backend_service_name, [instance_group_1_selfLink])['targetLink']
original_backend_service_configs = self.google_api_interface.get_regional_backend_service_configs(
backend_service_name)
forwarding_rule_selfLink = \
self.test_resource_creator.create_regional_forwarding_rule_with_backend_service(
'sample_tcp_regional_forwarding_rule_internal.json',
forwarding_rule_name,
backend_service_selfLink)['targetLink']
original_forwarding_rule_config = self.google_api_interface.get_regional_forwarding_rule_config(
forwarding_rule_name)
### start migration
selfLink_executor = SelfLinkExecutor(self.compute,
forwarding_rule_selfLink,
self.test_resource_creator.network_name,
self.test_resource_creator.subnetwork_name,
)
migration_handler = selfLink_executor.build_migration_handler()
migration_handler.network_migration()
### check migration result
# check internal forwarding rule
new_forwarding_rule_config = self.google_api_interface.get_regional_forwarding_rule_config(
forwarding_rule_name)
self.assertTrue(resource_config_is_unchanged_except_for_network(
original_forwarding_rule_config,
new_forwarding_rule_config))
self.assertTrue(
check_selfLink_equal(new_forwarding_rule_config['network'],
self.test_resource_creator.network_selfLink))
# check backend service config
new_backend_service_configs = self.google_api_interface.get_regional_backend_service_configs(
backend_service_name)
self.assertTrue(resource_config_is_unchanged_except_for_network(
original_backend_service_configs,
new_backend_service_configs))
self.assertEqual(new_backend_service_configs['network'],
self.test_resource_creator.network_selfLink)
# check its backends
new_instance_template_1_configs = self.google_api_interface.get_multi_zone_instance_template_configs \
(
group_name_1)
self.assertTrue(
instance_template_config_is_unchanged_except_for_network_and_name(
original_instance_template_1_configs,
new_instance_template_1_configs)
)
self.assertTrue(
check_instance_template_network(new_instance_template_1_configs,
self.test_resource_creator.network_selfLink,
self.test_resource_creator.subnetwork_selfLink))
print('Pass the current test')
def testWithTargetInstanceAttached(self):
""" A targetInstance is in use by the forwarding rule
Expectation: both the targetInstance and the forwarding rule will be migrated
"""
### create test resources
forwarding_rule_name = 'end-to-end-test-forwarding-rule'
target_instance_name = 'end-to-end-test-instance'
operation = self.test_resource_creator.create_instance_using_template(
target_instance_name,
self.test_resource_creator.legacy_instance_template_selfLink)
instance_selfLink = operation['targetLink']
target_instance_selfLink = \
self.test_resource_creator.create_a_target_instance(
target_instance_name, instance_selfLink)['targetLink']
original_instance_config = self.google_api_interface.get_instance_configs(
target_instance_name)
forwarding_rule_selfLink = \
self.test_resource_creator.create_regional_forwarding_rule_with_target(
'sample_tcp_regional_forwarding_rule_internal.json',
forwarding_rule_name,
target_instance_selfLink)['targetLink']
original_forwarding_rule_config = self.google_api_interface.get_regional_forwarding_rule_config(
forwarding_rule_name)
### start migration
selfLink_executor = SelfLinkExecutor(self.compute,
forwarding_rule_selfLink,
self.test_resource_creator.network_name,
self.test_resource_creator.subnetwork_name,
)
migration_handler = selfLink_executor.build_migration_handler()
migration_handler.network_migration()
### check migration result
# check internal forwarding rule network
new_forwarding_rule_config = self.google_api_interface.get_regional_forwarding_rule_config(
forwarding_rule_name)
self.assertTrue(
resource_config_is_unchanged_except_for_network(
original_forwarding_rule_config,
new_forwarding_rule_config))
self.assertEqual(new_forwarding_rule_config['network'],
self.test_resource_creator.network_selfLink)
self.assertTrue(
check_selfLink_equal(new_forwarding_rule_config['network'],
self.test_resource_creator.network_selfLink))
# check instance network
new_instance_config = self.google_api_interface.get_instance_configs(
target_instance_name)
self.assertTrue(
resource_config_is_unchanged_except_for_network(new_instance_config,
original_instance_config))
# network changed
self.assertTrue(check_instance_network(new_instance_config,
self.test_resource_creator.network_selfLink,
self.test_resource_creator.subnetwork_selfLink))
print('Pass the current test')
def tearDown(self) -> None:
pass
def doCleanups(self) -> None:
self.google_api_interface.clean_all_resources()
if __name__ == '__main__':
warnings.filterwarnings(action="ignore", message="unclosed",
category=ResourceWarning)
unittest.main(failfast=True)
|
googleinterns/vm-network-migration
|
vm_network_migration_end_to_end_tests/test_forwarding_rule_migration/test_internal_forwarding_rule_migration.py
|
Python
|
apache-2.0
| 9,284
|
# -*- coding: utf-8 -*-
import wx
import app
from classes.ui import UIManager
# from Algo.Modeling.Reflectivity import Reflectivity2
def create_properties_dialog(obj_uid, size=None):
if not size:
size = (300, 330)
UIM = UIManager()
try:
dlg = UIM.create('object_properties_dialog_controller')
# print(dlg)
dlg.obj_uid = obj_uid
dlg.view.SetSize(size)
dlg.view.ShowModal()
except Exception as e:
print('\nERROR create_properties_dialog:', e)
raise
finally:
UIM.remove(dlg.uid)
"""
Load interface data.
"""
def load_application_UI_data(fullfilename):
pass
# UIM = UIManager()
# UIM.load_application_state_from_file(fullfilename)
"""
Load interface data.
"""
def load_user_UI_data(fullfilename):
pass
# UIM = UIManager()
# UIM.load_user_state_from_file(fullfilename)
"""
Save application structure UI data.
"""
def save_UI_application_data(fullfilename):
pass
# UIM = UIManager()
# UIM.save_application_state_to_file(fullfilename)
"""
Save user UI data.
"""
def save_UI_user_data(fullfilename):
pass
# UIM = UIManager()
# UIM.save_user_state_to_file(fullfilename)
# UIM._save_state_to_file(self.UIM_file)
"""
Loads Gripy Initial Interface (MainWindow and it's children).
"""
def load():
# load_UI_file = True
load_UI_file = False
gripy_app = wx.GetApp()
# gripy_app = app.gripy_app.GripyApp.Get()
if not gripy_app:
raise Exception('ERRO grave.')
UIM = UIManager()
if load_UI_file:
"""
Load basic app from file.
"""
load_application_UI_data(gripy_app._gripy_app_state.get('app_UI_file'))
load_user_UI_data(gripy_app._gripy_app_state.get('user_UI_file'))
mwc = UIM.list('main_window_controller')[0]
else:
"""
Construct the application itself.
"""
mwc = UIM.create('main_window_controller',
title=gripy_app._gripy_app_state.get('app_display_name'),
pos=(2000, 800), maximized=True)
# """
# Menubar
menubar_ctrl = UIM.create('menubar_controller', mwc.uid)
# First level Menus
mc_project = UIM.create('menu_controller', menubar_ctrl.uid, label=u"&Project")
mc_edit = UIM.create('menu_controller', menubar_ctrl.uid, label=u"&Edit")
mc_well = UIM.create('menu_controller', menubar_ctrl.uid, label=u"&Well")
mc_precond = UIM.create('menu_controller', menubar_ctrl.uid, label=u"&Preconditioning")
mc_model = UIM.create('menu_controller', menubar_ctrl.uid, label=u"&Modeling")
mc_interp = UIM.create('menu_controller', menubar_ctrl.uid, label=u"&Interpretation")
# mc_infer = UIM.create('menu_controller', menubar_ctrl.uid, label=u"&Inference")
# mc_specdecom = UIM.create('menu_controller', menubar_ctrl.uid, label=u"&SpecDecom")
mc_tools = UIM.create('menu_controller', menubar_ctrl.uid, label=u"&Tools")
mc_plugins = UIM.create('menu_controller', menubar_ctrl.uid, label=u"&Plugins")
# Project Menu
UIM.create('menu_item_controller', mc_project.uid,
label=u'&New project',
help=u'Create a new empty GriPy Project.',
id=wx.ID_NEW,
callback='app.menu_functions.on_new'
)
UIM.create('menu_item_controller', mc_project.uid,
kind=wx.ITEM_SEPARATOR
)
UIM.create('menu_item_controller', mc_project.uid,
label=u'&Open',
help=u'Open GriPy Project (*.pgg)',
id=wx.ID_OPEN,
callback='app.menu_functions.on_open'
)
UIM.create('menu_item_controller', mc_project.uid,
label=u'&Save',
help=u'Save GriPy Project',
id=wx.ID_SAVE,
callback='app.menu_functions.on_save'
)
UIM.create('menu_item_controller', mc_project.uid,
label=u'&Save as',
help=u'Save GriPy Project with a new name',
id=wx.ID_SAVEAS,
callback='app.menu_functions.on_save_as'
)
UIM.create('menu_item_controller', mc_project.uid,
kind=wx.ITEM_SEPARATOR
)
mc_import = UIM.create('menu_controller', mc_project.uid,
label=u"&Import",
help=u"Import file"
)
UIM.create('menu_item_controller', mc_import.uid,
label=u"SEG-Y Well Gather",
help=u'Import a SEG-Y Seismic file as Well Gather',
callback='app.menu_functions.on_import_segy_well_gather'
)
UIM.create('menu_item_controller', mc_import.uid,
label=u"SEG-Y Seismic",
help=u'Import a SEG-Y Seismic file to current GriPy Project',
callback='app.menu_functions.on_import_segy_seis'
)
UIM.create('menu_item_controller', mc_import.uid,
label=u"SEG-Y Velocity",
help=u'Import a SEG-Y Velocity file to current GriPy Project',
callback='app.menu_functions.on_import_segy_vel'
)
mc_export = UIM.create('menu_controller', mc_project.uid,
label=u"Export",
help=u"Export file"
)
UIM.create('menu_item_controller', mc_export.uid,
label=u"LAS File",
help=u'Export a LAS file from a well in current GriPy Project',
callback='app.menu_functions.on_export_las'
)
UIM.create('menu_item_controller', mc_project.uid,
kind=wx.ITEM_SEPARATOR
)
UIM.create('menu_item_controller', mc_project.uid,
label=u'Exit',
help=u'Exits GRIPy application.',
id=wx.ID_EXIT,
callback='app.menu_functions.on_exit'
)
# Edit Menu
"""
mc_partition = UIM.create('menu_controller', mc_edit.uid,
label=u"&Partition",
help=u"Create / Edit Partition"
)
"""
mc_rocktable = UIM.create('menu_controller', mc_edit.uid,
label=u"&Rock Table",
help=u"Create / Edit RockTable"
)
UIM.create('menu_item_controller', mc_rocktable.uid,
label=u"New Rock Table",
help=u'New Rock Table',
callback='app.menu_functions.on_new_rocktable'
)
UIM.create('menu_item_controller', mc_rocktable.uid,
label=u"Edit Rock Table",
help=u'Edit Rock Table',
callback='app.menu_functions.on_edit_rocktable'
)
UIM.create('menu_item_controller', mc_edit.uid,
label=u'&Well Plot',
help=u'Well Plot',
callback='app.menu_functions.on_new_wellplot'
)
UIM.create('menu_item_controller', mc_edit.uid,
label=u'&Crossplot',
help=u'Crossplot',
callback='app.menu_functions.on_new_crossplot'
)
# UIM.create('menu_item_controller', mc_edit.uid,
# label=u'&Rock',
# help=u'Initialize rock model',
# callback='app.menu_functions.on_rock'
# )
# UIM.create('menu_item_controller', mc_edit.uid,
# label=u'&Fluid',
# help=u'Initialize fluid model',
# callback='app.menu_functions.on_fluid'
# )
# Well Menu
UIM.create('menu_item_controller',
mc_well.uid,
label=u"New well",
help=u"Create well",
callback='app.menu_functions.on_create_well'
)
# Import Well
mc_import_well = UIM.create('menu_controller',
mc_well.uid,
label=u"&Import Well"
)
UIM.create('menu_item_controller',
mc_import_well.uid,
label=u"LAS File",
help=u'Import a LAS file to current GriPy Project',
callback='app.menu_functions.on_import_las'
)
UIM.create('menu_item_controller',
mc_import_well.uid,
label=u"LIS File",
help=u'Import a LIS file to current GriPy Project',
callback='app.menu_functions.on_import_lis'
)
UIM.create('menu_item_controller',
mc_import_well.uid,
label=u"DLIS File",
help=u'Import a DLIS file to current GriPy Project',
callback='app.menu_functions.on_import_dlis'
)
UIM.create('menu_item_controller',
mc_import_well.uid,
label=u"ODT File",
help=u'Import a ODT file to current GriPy Project',
callback='app.menu_functions.on_import_odt'
)
#
UIM.create('menu_item_controller', mc_well.uid,
kind=wx.ITEM_SEPARATOR
)
UIM.create('menu_item_controller', mc_well.uid,
label=u"Create Synthetic Log",
callback='app.menu_functions.on_create_synthetic'
)
"""
### Trabalho Roseane
UIM.create('menu_item_controller', mc_well.uid,
kind=wx.ITEM_SEPARATOR
)
UIM.create('menu_item_controller', mc_well.uid,
label=u'PoroPerm Cross-Plot',
callback='app.menu_functions.create_poro_perm_xplot'
)
UIM.create('menu_item_controller', mc_well.uid,
label=u'Winland Cross-Plot',
callback='app.menu_functions.create_winland_xplot'
)
UIM.create('menu_item_controller', mc_well.uid,
label=u'Stratigraphic Modified lorenz Plot (SMLP)',
callback='app.menu_functions.create_SMLP_xplot'
)
UIM.create('menu_item_controller', mc_well.uid,
label=u'Modified lorenz Plot (MLP)',
callback='app.menu_functions.create_MLP_xplot'
)
UIM.create('menu_item_controller', mc_well.uid,
label=u'Depth vs Acumulated KH',
callback='app.menu_functions.create_Depth_vs_kHAcum_xplot'
)
### FIM - Trabalho Roseane
"""
"""
# Inference Menu
UIM.create('menu_item_controller', mc_infer.uid,
label=u"Avo PP",
callback='app.menu_functions.teste6'
)
UIM.create('menu_item_controller', mc_infer.uid,
label=u"Avo PP-PS",
callback='app.menu_functions.teste7'
)
"""
# Interpretation Menu
mc_specdecom = UIM.create('menu_controller', mc_interp.uid,
label=u"Spectral Decomposition",
help=u"Spectral Decomposition",
)
UIM.create('menu_item_controller', mc_specdecom.uid,
label=u"Continuous Wavelet Transform",
callback='app.menu_functions.on_cwt'
)
mc_attributes = UIM.create('menu_controller', mc_interp.uid,
label=u"Attributes",
help=u"Attributes",
)
UIM.create('menu_item_controller', mc_attributes.uid,
label=u"Phase Rotation",
callback='app.menu_functions.on_phase_rotation'
)
UIM.create('menu_item_controller', mc_attributes.uid,
label=u"Hilbert Attributes",
callback='app.menu_functions.on_hilbert_attributes'
)
# Modeling Menu
UIM.create('menu_item_controller', mc_model.uid,
label=u"Create 2/3 layers model",
callback='app.menu_functions.on_create_model'
)
UIM.create('menu_item_controller', mc_model.uid,
kind=wx.ITEM_SEPARATOR
)
UIM.create('menu_item_controller', mc_model.uid,
label=u"Aki-Richards PP",
callback='app.menu_functions.on_akirichards_pp'
)
UIM.create('menu_item_controller', mc_model.uid,
label=u"Reflectivity Method",
callback='app.menu_functions.ReflectivityModel'
)
# UIM.create('menu_item_controller', mc_model.uid,
# kind=wx.ITEM_SEPARATOR
# )
# UIM.create('menu_item_controller', mc_model.uid,
# label=u"Poisson ratio",
# callback='app.menu_functions.on_poisson_ratio'
# )
# Tools Menu
UIM.create('menu_item_controller', mc_tools.uid,
label="Coding Console", help=u"Gripy Coding Console",
callback='app.menu_functions.on_coding_console'
)
#
"""
# Debug Menu
UIM.create('menu_item_controller', mc_debug.uid,
label=u"Load Wilson Synthetics",
callback='app.menu_functions.on_load_wilson'
)
UIM.create('menu_item_controller', mc_debug.uid,
label=u"Load Stack North Viking Data",
callback='app.menu_functions.teste10'
)
UIM.create('menu_item_controller', mc_debug.uid,
label=u"Teste 11",
callback='app.menu_functions.teste11'
)
UIM.create('menu_item_controller', mc_debug.uid,
label=u'Calc Well Time from Depth curve',
callback='app.menu_functions.calc_well_time_from_depth'
)
UIM.create('menu_item_controller', mc_debug.uid,
kind=wx.ITEM_SEPARATOR
)
UIM.create('menu_item_controller', mc_debug.uid,
label="Load Teste 2019",
callback='app.menu_functions.on_load_teste_2019'
)
"""
# Fim Main Menu Bar
# Object Manager TreeController
UIM.create('tree_controller', mwc.uid)
# Main ToolBar
tbc = UIM.create('toolbar_controller', mwc.uid)
UIM.create('toolbartool_controller', tbc.uid,
label=u"New project",
bitmap='new_file-30.png',
help='New project',
long_help='Start a new Gripy project, closes existing',
callback='app.menu_functions.on_new'
)
UIM.create('toolbartool_controller', tbc.uid,
label=u"Abrir projeto",
bitmap='open_folder-30.png',
help='Abrir projeto',
long_help='Abrir projeto GriPy',
callback='app.menu_functions.on_open'
)
UIM.create('toolbartool_controller', tbc.uid,
label=u"Salvar projeto",
bitmap='save_close-30.png',
help='Salvar projeto',
long_help='Salvar projeto GriPy',
callback='app.menu_functions.on_save'
)
UIM.create('toolbartool_controller', tbc.uid,
label=u"Well Plot",
bitmap='oil_rig-30.png',
help='Well Plot',
long_help='Well Plot',
callback='app.menu_functions.on_new_wellplot'
)
UIM.create('toolbartool_controller', tbc.uid,
label=u"Crossplot",
bitmap='scatter_plot-30.png',
help='Crossplot',
long_help='Crossplot',
callback='app.menu_functions.on_new_crossplot'
)
# StatusBar
UIM.create('statusbar_controller', mwc.uid,
label='Bem vindo ao ' + \
app.gripy_app.GripyApp.Get()._gripy_app_state.get('app_display_name')
)
# _do_initial_tests()
def get_main_window_controller():
UIM = UIManager()
mwc = UIM.list('main_window_controller')[0]
return mwc
"""
Funcao reservada para alguns testes
"""
def _do_initial_tests():
# pass
mwc = get_main_window_controller()
# mwc.size = (1000, 700)
depth = [
2240.07,
2240.22,
2240.38,
2240.53,
2240.68,
2240.83,
2240.99,
2241.14,
2241.29,
2241.44,
2241.60,
2241.75,
2241.90,
2242.05,
2242.21,
2242.36,
2242.51,
2242.66,
2242.81,
2242.97,
2243.12,
2243.27,
2243.42,
2243.58,
2243.73,
2243.88,
2244.03,
2244.19,
2244.34,
2244.49,
2244.64,
2244.80,
2244.95,
2245.10,
2245.25,
2245.41,
2245.56,
2245.71,
2245.86,
2246.02,
2246.17,
2246.32,
2246.47,
2246.62,
2246.78,
2246.93,
2247.08,
2247.23,
2247.39,
2247.54,
2247.69,
2247.84,
2248.00,
2248.15,
2248.30,
2248.45,
2248.61,
2248.76,
2248.91,
2249.06,
2249.22,
2249.37,
2249.52,
2249.67,
2249.83,
2249.98,
2250.13,
2250.28,
2250.43,
2250.59,
2250.74,
2250.89,
2251.04,
2251.20,
2251.35,
2251.50,
2251.65,
2251.81,
2251.96,
2252.11,
2252.26,
2252.42,
2252.57,
2252.72,
2252.87,
2253.03,
2253.18,
2253.33,
2253.48,
2253.64,
2253.79,
2253.94,
2254.09,
2254.24,
2254.40,
2254.55,
2254.70,
2254.85,
2255.01,
2255.16,
2255.31,
2255.46,
2255.62,
2255.77,
2255.92,
2256.07,
2256.23,
2256.38,
2256.53,
2256.68,
2256.84,
2256.99,
2257.14,
2257.29,
2257.45,
2257.60,
2257.75,
2257.90,
2258.05,
2258.21,
2258.36,
2258.51,
2258.66,
2258.82,
2258.97,
2259.12,
2259.27,
2259.43,
2259.58,
2259.73,
2259.88,
2260.04,
2260.19,
2260.34,
2260.49,
2260.65,
2260.80,
2260.95,
2261.10,
2261.26,
2261.41,
2261.56,
2261.71,
2261.86,
2262.02,
2262.17,
2262.32,
2262.47,
2262.63,
2262.78,
2262.93,
2263.08,
2263.24,
2263.39,
2263.54,
2263.69,
2263.85,
2264.00,
2264.15,
2264.30,
2264.46,
2264.61,
2264.76,
2264.91,
2265.07,
2265.22,
2265.37,
2265.52,
2265.67,
2265.83,
2265.98,
2266.13,
2266.28,
2266.44,
2266.59,
2266.74,
2266.89,
2267.05,
2267.20,
2267.35,
2267.50,
2267.66,
2267.81,
2267.96,
2268.11,
2268.27,
2268.42,
2268.57,
2268.72,
2268.88,
2269.03,
2269.18,
2269.33,
2269.48,
2269.64,
2269.79,
2269.94,
2270.09,
2270.25,
2270.40,
2270.55,
2270.70,
2270.86,
2271.01,
2271.16,
2271.31,
2271.47,
2271.62,
2271.77,
2271.92,
2272.08,
2272.23,
2272.38,
2272.53,
2272.69,
2272.84,
2272.99,
2273.14,
2273.29,
2273.45,
2273.60,
2273.75,
2273.90,
2274.06,
2274.21,
2274.36,
2274.51,
2274.67,
2274.82,
2274.97,
2275.12,
2275.28,
2275.43,
2275.58,
2275.73,
2275.89,
2276.04,
2276.19,
2276.34,
2276.50,
2276.65,
2276.80,
2276.95,
2277.10,
2277.26,
2277.41,
2277.56,
2277.71,
2277.87,
2278.02,
2278.17,
2278.32,
2278.48,
2278.63,
2278.78,
2278.93,
2279.09,
2279.24,
2279.39,
2279.54,
2279.70,
2279.85,
2280.00,
2280.15,
2280.31,
2280.46,
2280.61,
2280.76,
2280.91,
2281.07,
2281.22,
2281.37,
2281.52,
2281.68,
2281.83,
2281.98,
2282.13,
2282.29,
2282.44,
2282.59,
2282.74,
2282.90,
2283.05,
2283.20,
2283.35,
2283.51,
2283.66,
2283.81,
2283.96,
2284.12,
2284.27,
2284.42,
2284.57,
2284.72,
2284.88,
2285.03
]
phi = [
12.71,
13.03,
13.91,
14.24,
14.14,
14.05,
13.56,
14.80,
18.43,
18.44,
18.44,
21.12,
21.15,
17.38,
15.26,
13.14,
13.79,
13.79,
17.52,
20.47,
23.41,
25.46,
25.37,
25.39,
25.40,
25.40,
27.12,
30.32,
29.79,
29.28,
28.77,
28.77,
27.44,
28.19,
28.16,
28.13,
28.36,
28.37,
26.91,
24.28,
21.66,
16.64,
13.95,
13.96,
16.51,
19.06,
23.68,
25.13,
19.13,
19.13,
19.13,
11.90,
9.31,
9.70,
11.75,
13.81,
13.82,
18.41,
21.13,
20.91,
20.69,
20.68,
20.37,
21.27,
21.83,
22.39,
20.46,
20.46,
16.11,
14.51,
12.92,
12.37,
12.38,
14.83,
15.59,
16.35,
15.95,
14.55,
14.56,
14.06,
13.56,
11.95,
11.58,
11.58,
11.53,
11.47,
11.62,
11.91,
11.91,
12.19,
12.48,
11.73,
11.46,
11.47,
11.22,
10.96,
9.73,
13.72,
13.73,
16.19,
18.65,
18.03,
13.38,
13.38,
12.02,
10.66,
10.46,
10.19,
10.19,
10.04,
9.89,
9.97,
9.97,
10.42,
10.55,
10.67,
10.72,
10.72,
10.56,
10.65,
10.75,
11.36,
11.36,
11.22,
11.09,
10.96,
9.82,
9.82,
7.87,
7.16,
6.46,
5.33,
5.33,
4.40,
3.59,
2.77,
2.37,
2.37,
5.29,
7.49,
9.69,
10.75,
10.76,
8.09,
6.05,
4.00,
1.81,
1.81,
1.41,
2.65,
3.90,
5.53,
5.52,
7.33,
7.22,
7.10,
7.38,
7.39,
6.75,
7.39,
8.02,
11.19,
11.20,
11.50,
10.28,
9.05,
6.72,
6.72,
7.45,
7.65,
7.84,
7.10,
7.10,
5.83,
5.72,
5.61,
4.59,
4.10,
4.10,
4.79,
5.48,
6.55,
7.06,
7.06,
7.21,
7.36,
8.14,
9.52,
10.88,
10.88,
10.89,
11.31,
11.47,
11.27,
11.27,
11.27,
11.02,
10.32,
10.17,
11.47,
12.78,
12.78,
16.69,
19.24,
20.08,
20.92,
20.92,
20.50,
19.54,
19.01,
18.48,
19.14,
19.14,
21.05,
22.17,
23.28,
24.52,
24.55,
24.56,
23.37,
22.18,
19.87,
20.66,
25.14,
27.49,
29.85,
29.85,
27.70,
20.82,
18.12,
15.42,
17.81,
17.81,
24.27,
26.65,
29.04,
30.60,
30.29,
29.74,
29.55,
29.35,
29.35,
29.75,
30.04,
30.74,
31.44,
32.84,
34.00,
33.49,
33.49,
33.49,
31.75,
30.09,
29.21,
29.32,
29.43,
29.69,
29.54,
29.54,
29.33,
29.11,
29.05,
29.68,
30.06,
29.81,
29.55,
28.75,
28.50,
28.76,
28.76,
28.77,
28.43,
27.98,
26.45,
25.01,
23.57,
20.54,
19.43,
20.82,
21.88,
22.94,
23.35,
21.23,
17.69,
17.69,
17.69,
15.08,
13.85,
13.22,
14.04,
14.86,
16.63,
15.77,
12.90,
11.81,
10.72
]
k = [
3.17,
3.73,
5.40,
6.05,
6.04,
6.03,
4.90,
6.40,
21.92,
21.94,
21.96,
39.61,
35.40,
9.82,
5.63,
1.45,
4.24,
4.24,
22.42,
58.34,
94.26,
151.13,
143.73,
117.33,
117.42,
117.51,
125.67,
215.58,
158.82,
155.51,
152.19,
152.12,
120.77,
180.07,
207.44,
234.81,
256.96,
257.26,
128.33,
85.05,
41.76,
15.27,
7.26,
7.27,
20.29,
33.32,
88.98,
155.43,
36.55,
36.55,
36.54,
1.99,
0.00,
0.00,
1.49,
2.97,
2.98,
17.56,
40.01,
37.44,
34.87,
34.84,
28.78,
42.96,
61.04,
79.13,
49.58,
49.58,
12.45,
7.08,
1.70,
0.47,
0.47,
3.35,
5.34,
7.33,
7.46,
3.98,
3.99,
2.77,
1.56,
0.82,
0.63,
0.63,
0.66,
0.69,
1.16,
0.49,
0.49,
0.47,
0.44,
0.09,
0.07,
0.07,
0.48,
0.88,
0.60,
11.45,
11.47,
20.36,
29.25,
19.84,
3.37,
3.36,
1.73,
0.10,
0.08,
0.11,
0.11,
0.12,
0.14,
0.15,
0.15,
0.15,
0.15,
0.15,
0.17,
0.17,
0.18,
0.28,
0.37,
0.56,
0.56,
0.64,
0.87,
1.10,
1.73,
1.73,
1.76,
1.23,
0.69,
0.17,
0.17,
0.13,
0.08,
0.04,
0.01,
0.01,
0.52,
3.90,
7.28,
16.15,
16.16,
6.50,
3.48,
0.47,
0.03,
0.03,
0.02,
0.32,
0.62,
1.04,
1.04,
1.82,
1.30,
0.79,
0.84,
0.84,
0.51,
1.58,
2.66,
13.37,
13.39,
13.48,
8.85,
4.22,
0.38,
0.38,
0.61,
0.59,
0.56,
0.25,
0.25,
0.04,
0.03,
0.03,
0.01,
0.02,
0.02,
0.02,
0.02,
0.02,
0.01,
0.01,
0.03,
0.06,
0.48,
1.42,
2.44,
2.44,
2.44,
2.06,
2.13,
2.31,
2.31,
2.31,
1.68,
1.08,
1.06,
2.60,
4.15,
4.15,
12.25,
27.08,
38.61,
50.14,
50.13,
55.07,
44.74,
40.26,
35.79,
46.19,
46.22,
80.29,
105.75,
131.21,
185.27,
171.19,
171.31,
129.09,
86.87,
41.32,
41.43,
119.83,
325.13,
530.42,
530.63,
453.72,
85.49,
52.88,
20.27,
33.22,
33.23,
149.22,
329.19,
509.16,
900.93,
590.33,
343.81,
296.22,
248.63,
248.46,
259.63,
328.47,
421.46,
514.44,
628.84,
692.98,
445.79,
445.70,
445.61,
335.99,
301.97,
295.60,
302.84,
310.09,
311.65,
323.01,
323.06,
299.90,
276.74,
234.65,
231.59,
257.46,
267.27,
277.08,
244.13,
246.43,
245.78,
245.91,
246.05,
197.69,
187.18,
95.09,
63.74,
32.39,
13.97,
15.42,
30.43,
35.40,
40.37,
35.11,
19.13,
8.35,
8.35,
8.35,
2.52,
0.69,
0.85,
3.02,
5.20,
13.43,
8.33,
1.22,
0.62,
0.01
]
from classes.om import ObjectManager
import numpy as np
OM = ObjectManager()
#
well = OM.new('well', name='Winland-Lorenz')
OM.add(well)
#
iset = OM.new('curve_set', name='Run 001')
OM.add(iset, well.uid)
#
# """
index = OM.new('data_index', np.array(depth), name='Depth', dimension=0, datatype='MD', unit='m')
OM.add(index, iset.uid)
#
log = OM.new('log', np.array(phi) / 100, index_uid=index.uid, name='Phi', unit='dec', datatype='NMRperm')
OM.add(log, iset.uid)
# """
"""
#
log = OM.new('log', np.array(k), index_uid=index.uid, name='K', unit='mD', datatype='CorePerm')
OM.add(log, iset.uid)
#
#
#
"""
iset2 = OM.new('curve_set', name='Run 002')
OM.add(iset2, well.uid)
#
index = OM.new('data_index', np.array(depth), name='Depth', dimension=0, datatype='MD', unit='m')
OM.add(index, iset2.uid)
#
log = OM.new('log', np.array(phi) / 100, index_uid=index.uid, name='Phi', unit='dec', datatype='NMRperm')
OM.add(log, iset2.uid)
#
log = OM.new('log', np.array(k), index_uid=index.uid, name='K', unit='mD', datatype='CorePerm')
OM.add(log, iset2.uid)
#
# """
# Trabalho Sala GBDI/PCE
# mwc.pos = (-1900, -700)
# mwc.maximized = False
# CASA
mwc.pos = (-8, 0)
mwc.size = (1240, 1046)
mwc.maximized = False
# BR
# mwc.pos = (-1925, -921)
# mwc.size = (1116, 1131)
# mwc.pos = (-1300,600)
# mwc.maximized = True
"""
from om.manager import ObjectManager
OM = ObjectManager()
well = OM.new('well', name='ZZZ')
OM.add(well)
"""
"""
mwc = get_main_window_controller()
mwc.size = wx.Size(900, 200)
mwc.size = wx.Size(900, 460)
print (mwc.name)
print (mwc['name'])
del mwc.name
# del mwc['name']
"""
'''
mwc = get_main_window_controller()
mwc.pos = (-1092, 606)
mwc.size = (900, 600)
mwc.maximized = False
'''
'''
print ('\n\n\n\n\n\n\n\n\n\n')
from om.manager import ObjectManager
OM = ObjectManager()
OM.print_info()
well = OM.new('well', name='ZZZ')
OM.add(well)
'''
"""
OM.print_info()
OM.remove(well.uid)
OM.print_info()
well1 = OM.new('well', name='xxx')
OM.add(well1)
OM.print_info()
well2 = OM.new('well', name='yyy')
OM.add(well2)
OM.print_info()
OM.remove(well1.uid)
OM.print_info()
OM.remove(well2.uid)
OM.print_info()
"""
# fullfilename = 'C:\\Users\\Adriano\\Desktop\\aaa_teste_5.pgg'
# fullfilename = 'C:\\Users\\Adriano\\Desktop\\aaa_teste_8.pgg'
# fullfilename = 'C:\\Users\\Adriano\\Desktop\\2709_pocos_classes.pgg'
# app.load_project_data(fullfilename)
#
# lpc = UIM.create('logplot_controller', mwc.uid)
# tc1 = UIM.create('track_controller', lpc.uid)
# tc1.width = 900
# UIM.create('track_controller', lpc.uid)
# UIM.create('track_controller', lpc.uid)
# UIM.create('track_controller', lpc.uid, overview=True, plotgrid=False)
# """
# Fim - Testes
# """
|
giruenf/GRIPy
|
classes/ui/interface.py
|
Python
|
apache-2.0
| 34,446
|
from sys import stdin,stdout
# stdin = open("/Users/seeva92/Workspace/Contests/1.txt","r")
# stdout = open("/Users/seeva92/Workspace/Contests/2.txt","w")
def main():
# stdout.write("wow")
t = int(stdin.readline().strip())
for i in range(t):
x = int(stdin.readline().strip())
if x%6 == 0:
stdout.write("Misha\n")
else:
stdout.write("Chef\n")
main()
|
shiva92/Contests
|
Codechef/June/Long/ChefAndCoinsGame.py
|
Python
|
apache-2.0
| 408
|
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the GTest action."""
import sys
from launch import SomeSubstitutionsType
from .test import Test
class PyTest(Test):
"""Action that runs a Pytest test."""
def __init__(
self,
*,
path: SomeSubstitutionsType,
**kwargs
) -> None:
"""
Create a PyTest test action.
timeout argument is passed to :class:`launch_testing.Test`.
The other arguments are passed to :class:`launch.ExecuteProcess`, so
see the documentation for the class for additional details.
:param: path to the test to be executed.
"""
cmd = [sys.executable, '-m', 'pytest', path]
super().__init__(cmd=cmd, **kwargs)
self.__path = path
@property
def path(self):
"""Getter for path."""
return self.__path
|
ros2/launch
|
launch_testing/launch_testing/actions/pytest.py
|
Python
|
apache-2.0
| 1,434
|
#!/usr/bin/env python
# ===============================================================================
# Copyright 2015 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from datetime import timedelta, datetime
import gdal
import numpy
import osr
__author__ = "Simon Oldfield"
import logging
_log = logging.getLogger()
def daterange(start_date, end_date, step):
for n in range(0, int((end_date - start_date).days), step):
yield start_date + timedelta(n)
def main():
x = 140
y = -36
path = "/Users/simon/tmp/cube/output/applications/wetness_with_statistics_2015-04-17/stack/LS_WETNESS_{x:03d}_{y:04d}.tif".format(x=x, y=y)
acq_dt_min = datetime(2006, 1, 1).date()
acq_dt_max = datetime(2006, 3, 31).date()
acq_dt_step = 8
dates = list(daterange(acq_dt_min, acq_dt_max, acq_dt_step))
driver = gdal.GetDriverByName("GTiff")
assert driver
width = 100
height = 100
raster = driver.Create(path, width, height, len(dates), gdal.GDT_Float32, options=["INTERLEAVE=BAND"])
assert raster
raster.SetGeoTransform((x, 0.00025, 0.0, y+1, 0.0, -0.00025))
srs = osr.SpatialReference()
srs.ImportFromEPSG(4326)
raster.SetProjection(srs.ExportToWkt())
md = {
"X_INDEX": "{x:03d}".format(x=x),
"Y_INDEX": "{y:04d}".format(y=y),
"DATASET_TYPE": "WETNESS",
"ACQUISITION_DATE": "{acq_min} to {acq_max}".format(acq_min=acq_dt_min, acq_max=acq_dt_max),
"SATELLITE": "LS57",
"PIXEL_QUALITY_FILTER": "PQA_MASK_CLEAR",
"WATER_FILTER": ""
}
raster.SetMetadata(md)
for i, date in enumerate(dates, start=1):
_log.debug("Writing %s as %d", date, i)
data = numpy.empty((width, height), dtype=numpy.float32)
data.fill(i)
band = raster.GetRasterBand(i)
band.SetDescription(str(date))
band.SetNoDataValue(numpy.nan)
band.WriteArray(data)
band.ComputeStatistics(True)
band.FlushCache()
del band
raster.FlushCache()
del raster
if __name__ == "__main__":
main()
|
ama-jharrison/agdc
|
agdc/api-examples/source/test/python/datacube/api/application/wetness/generate_stack.py
|
Python
|
apache-2.0
| 2,699
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest2
from quantum.api.v2 import attributes
from quantum.common import exceptions as q_exc
class TestAttributes(unittest2.TestCase):
def test_is_attr_set(self):
data = attributes.ATTR_NOT_SPECIFIED
self.assertIs(attributes.is_attr_set(data), False)
data = None
self.assertIs(attributes.is_attr_set(data), False)
data = "I'm set"
self.assertIs(attributes.is_attr_set(data), True)
def test_validate_values(self):
msg = attributes._validate_values(4, [4, 6])
self.assertIsNone(msg)
msg = attributes._validate_values(4, (4, 6))
self.assertIsNone(msg)
msg = attributes._validate_values(7, [4, 6])
self.assertEqual(msg, "'7' is not in [4, 6]")
msg = attributes._validate_values(7, (4, 6))
self.assertEqual(msg, "'7' is not in (4, 6)")
def test_validate_string(self):
msg = attributes._validate_string(None, None)
self.assertEqual(msg, "'None' is not a valid string")
# 0 == len(data) == max_len
msg = attributes._validate_string("", 0)
self.assertIsNone(msg)
# 0 == len(data) < max_len
msg = attributes._validate_string("", 9)
self.assertIsNone(msg)
# 0 < len(data) < max_len
msg = attributes._validate_string("123456789", 10)
self.assertIsNone(msg)
# 0 < len(data) == max_len
msg = attributes._validate_string("123456789", 9)
self.assertIsNone(msg)
# 0 < max_len < len(data)
msg = attributes._validate_string("1234567890", 9)
self.assertEqual(msg, "'1234567890' exceeds maximum length of 9")
msg = attributes._validate_string("123456789", None)
self.assertIsNone(msg)
def test_validate_range(self):
msg = attributes._validate_range(1, [1, 9])
self.assertIsNone(msg)
msg = attributes._validate_range(5, [1, 9])
self.assertIsNone(msg)
msg = attributes._validate_range(9, [1, 9])
self.assertIsNone(msg)
msg = attributes._validate_range(1, (1, 9))
self.assertIsNone(msg)
msg = attributes._validate_range(5, (1, 9))
self.assertIsNone(msg)
msg = attributes._validate_range(9, (1, 9))
self.assertIsNone(msg)
msg = attributes._validate_range(0, [1, 9])
self.assertEqual(msg, "'0' is not in range 1 through 9")
msg = attributes._validate_range(10, (1, 9))
self.assertEqual(msg, "'10' is not in range 1 through 9")
def test_validate_mac_address(self):
mac_addr = "ff:16:3e:4f:00:00"
msg = attributes._validate_mac_address(mac_addr)
self.assertIsNone(msg)
mac_addr = "ffa:16:3e:4f:00:00"
msg = attributes._validate_mac_address(mac_addr)
self.assertEqual(msg, "'%s' is not a valid MAC address" % mac_addr)
def test_validate_ip_address(self):
ip_addr = '1.1.1.1'
msg = attributes._validate_ip_address(ip_addr)
self.assertIsNone(msg)
ip_addr = '1111.1.1.1'
msg = attributes._validate_ip_address(ip_addr)
self.assertEqual(msg, "'%s' is not a valid IP address" % ip_addr)
def test_validate_ip_pools(self):
pools = [[{'end': '10.0.0.254'}],
[{'start': '10.0.0.254'}],
[{'start': '1000.0.0.254',
'end': '1.1.1.1'}],
[{'start': '10.0.0.2', 'end': '10.0.0.254',
'forza': 'juve'}],
[{'start': '10.0.0.2', 'end': '10.0.0.254'},
{'end': '10.0.0.254'}],
[None],
None]
for pool in pools:
msg = attributes._validate_ip_pools(pool)
self.assertIsNotNone(msg)
pools = [[{'end': '10.0.0.254', 'start': '10.0.0.2'},
{'start': '11.0.0.2', 'end': '11.1.1.1'}],
[{'start': '11.0.0.2', 'end': '11.0.0.100'}]]
for pool in pools:
msg = attributes._validate_ip_pools(pool)
self.assertIsNone(msg)
def test_validate_fixed_ips(self):
fixed_ips = [
{'data': [{'subnet_id': '00000000-ffff-ffff-ffff-000000000000',
'ip_address': '1111.1.1.1'}],
'error_msg': "'1111.1.1.1' is not a valid IP address"},
{'data': [{'subnet_id': 'invalid',
'ip_address': '1.1.1.1'}],
'error_msg': "'invalid' is not a valid UUID"},
{'data': None,
'error_msg': "Invalid data format for fixed IP: 'None'"},
{'data': "1.1.1.1",
'error_msg': "Invalid data format for fixed IP: '1.1.1.1'"},
{'data': ['00000000-ffff-ffff-ffff-000000000000', '1.1.1.1'],
'error_msg': "Invalid data format for fixed IP: "
"'00000000-ffff-ffff-ffff-000000000000'"},
{'data': [['00000000-ffff-ffff-ffff-000000000000', '1.1.1.1']],
'error_msg': "Invalid data format for fixed IP: "
"'['00000000-ffff-ffff-ffff-000000000000', "
"'1.1.1.1']'"},
{'data': [{'subnet_id': '00000000-0fff-ffff-ffff-000000000000',
'ip_address': '1.1.1.1'},
{'subnet_id': '00000000-ffff-ffff-ffff-000000000000',
'ip_address': '1.1.1.1'}],
'error_msg': "Duplicate IP address '1.1.1.1'"}]
for fixed in fixed_ips:
msg = attributes._validate_fixed_ips(fixed['data'])
self.assertEqual(msg, fixed['error_msg'])
fixed_ips = [[{'subnet_id': '00000000-ffff-ffff-ffff-000000000000',
'ip_address': '1.1.1.1'}],
[{'subnet_id': '00000000-0fff-ffff-ffff-000000000000',
'ip_address': '1.1.1.1'},
{'subnet_id': '00000000-ffff-ffff-ffff-000000000000',
'ip_address': '1.1.1.2'}]]
for fixed in fixed_ips:
msg = attributes._validate_fixed_ips(fixed)
self.assertIsNone(msg)
def test_validate_nameservers(self):
ns_pools = [['1.1.1.2', '1.1.1.2'],
['www.hostname.com', 'www.hostname.com'],
['77.hostname.com'],
['1000.0.0.1'],
None]
for ns in ns_pools:
msg = attributes._validate_nameservers(ns, None)
self.assertIsNotNone(msg)
ns_pools = [['100.0.0.2'],
['www.hostname.com'],
['www.great.marathons.to.travel'],
['valid'],
['www.internal.hostname.com']]
for ns in ns_pools:
msg = attributes._validate_nameservers(ns, None)
self.assertIsNone(msg)
def test_validate_hostroutes(self):
hostroute_pools = [[{'destination': '100.0.0.0/24'}],
[{'nexthop': '10.0.2.20'}],
[{'nexthop': '10.0.2.20',
'forza': 'juve',
'destination': '100.0.0.0/8'}],
[{'nexthop': '1110.0.2.20',
'destination': '100.0.0.0/8'}],
[{'nexthop': '10.0.2.20',
'destination': '100.0.0.0'}],
[{'nexthop': '10.0.2.20',
'destination': '100.0.0.0/8'},
{'nexthop': '10.0.2.20',
'destination': '100.0.0.0/8'}],
[None],
None]
for host_routes in hostroute_pools:
msg = attributes._validate_hostroutes(host_routes, None)
self.assertIsNotNone(msg)
hostroute_pools = [[{'destination': '100.0.0.0/24',
'nexthop': '10.0.2.20'}],
[{'nexthop': '10.0.2.20',
'destination': '100.0.0.0/8'},
{'nexthop': '10.0.2.20',
'destination': '100.0.0.1/8'}]]
for host_routes in hostroute_pools:
msg = attributes._validate_hostroutes(host_routes, None)
self.assertIsNone(msg)
def test_validate_ip_address_or_none(self):
ip_addr = None
msg = attributes._validate_ip_address_or_none(ip_addr)
self.assertIsNone(msg)
ip_addr = '1.1.1.1'
msg = attributes._validate_ip_address_or_none(ip_addr)
self.assertIsNone(msg)
ip_addr = '1111.1.1.1'
msg = attributes._validate_ip_address_or_none(ip_addr)
self.assertEqual(msg, "'%s' is not a valid IP address" % ip_addr)
def test_hostname_pattern(self):
data = '@openstack'
msg = attributes._validate_regex(data, attributes.HOSTNAME_PATTERN)
self.assertIsNotNone(msg)
data = 'www.openstack.org'
msg = attributes._validate_regex(data, attributes.HOSTNAME_PATTERN)
self.assertIsNone(msg)
def test_uuid_pattern(self):
data = 'garbage'
msg = attributes._validate_regex(data, attributes.UUID_PATTERN)
self.assertIsNotNone(msg)
data = '00000000-ffff-ffff-ffff-000000000000'
msg = attributes._validate_regex(data, attributes.UUID_PATTERN)
self.assertIsNone(msg)
def test_mac_pattern(self):
# Valid - 3 octets
base_mac = "fa:16:3e:00:00:00"
msg = attributes._validate_regex(base_mac,
attributes.MAC_PATTERN)
self.assertIsNone(msg)
# Valid - 4 octets
base_mac = "fa:16:3e:4f:00:00"
msg = attributes._validate_regex(base_mac,
attributes.MAC_PATTERN)
self.assertIsNone(msg)
# Invalid - not unicast
base_mac = "01:16:3e:4f:00:00"
msg = attributes._validate_regex(base_mac,
attributes.MAC_PATTERN)
self.assertIsNotNone(msg)
# Invalid - invalid format
base_mac = "a:16:3e:4f:00:00"
msg = attributes._validate_regex(base_mac,
attributes.MAC_PATTERN)
self.assertIsNotNone(msg)
# Invalid - invalid format
base_mac = "ffa:16:3e:4f:00:00"
msg = attributes._validate_regex(base_mac,
attributes.MAC_PATTERN)
self.assertIsNotNone(msg)
# Invalid - invalid format
base_mac = "01163e4f0000"
msg = attributes._validate_regex(base_mac,
attributes.MAC_PATTERN)
self.assertIsNotNone(msg)
# Invalid - invalid format
base_mac = "01-16-3e-4f-00-00"
msg = attributes._validate_regex(base_mac,
attributes.MAC_PATTERN)
self.assertIsNotNone(msg)
# Invalid - invalid format
base_mac = "00:16:3:f:00:00"
msg = attributes._validate_regex(base_mac,
attributes.MAC_PATTERN)
self.assertIsNotNone(msg)
# Invalid - invalid format
base_mac = "12:3:4:5:67:89ab"
msg = attributes._validate_regex(base_mac,
attributes.MAC_PATTERN)
self.assertIsNotNone(msg)
def test_validate_subnet(self):
# Valid - IPv4
cidr = "10.0.2.0/24"
msg = attributes._validate_subnet(cidr,
None)
self.assertIsNone(msg)
# Valid - IPv6 without final octets
cidr = "fe80::/24"
msg = attributes._validate_subnet(cidr,
None)
self.assertIsNone(msg)
# Valid - IPv6 with final octets
cidr = "fe80::0/24"
msg = attributes._validate_subnet(cidr,
None)
self.assertIsNone(msg)
# Invalid - IPv4 missing mask
cidr = "10.0.2.0"
msg = attributes._validate_subnet(cidr,
None)
error = "'%s' is not a valid IP subnet" % cidr
self.assertEqual(msg, error)
# Invalid - IPv6 without final octets, missing mask
cidr = "fe80::"
msg = attributes._validate_subnet(cidr,
None)
error = "'%s' is not a valid IP subnet" % cidr
self.assertEqual(msg, error)
# Invalid - IPv6 with final octets, missing mask
cidr = "fe80::0"
msg = attributes._validate_subnet(cidr,
None)
error = "'%s' is not a valid IP subnet" % cidr
self.assertEqual(msg, error)
# Invalid - Address format error
cidr = 'invalid'
msg = attributes._validate_subnet(cidr,
None)
error = "'%s' is not a valid IP subnet" % cidr
self.assertEqual(msg, error)
def test_validate_regex(self):
pattern = '[hc]at'
data = None
msg = attributes._validate_regex(data, pattern)
self.assertEqual(msg, "'%s' is not a valid input" % data)
data = 'bat'
msg = attributes._validate_regex(data, pattern)
self.assertEqual(msg, "'%s' is not a valid input" % data)
data = 'hat'
msg = attributes._validate_regex(data, pattern)
self.assertIsNone(msg)
data = 'cat'
msg = attributes._validate_regex(data, pattern)
self.assertIsNone(msg)
def test_validate_uuid(self):
msg = attributes._validate_uuid('garbage')
self.assertEqual(msg, "'garbage' is not a valid UUID")
msg = attributes._validate_uuid('00000000-ffff-ffff-ffff-000000000000')
self.assertIsNone(msg)
def test_validate_uuid_list(self):
# check not a list
uuids = [None,
123,
'e5069610-744b-42a7-8bd8-ceac1a229cd4',
'12345678123456781234567812345678',
{'uuid': 'e5069610-744b-42a7-8bd8-ceac1a229cd4'}]
for uuid in uuids:
msg = attributes._validate_uuid_list(uuid)
error = "'%s' is not a list" % uuid
self.assertEquals(msg, error)
# check invalid uuid in a list
invalid_uuid_lists = [[None],
[123],
[123, 'e5069610-744b-42a7-8bd8-ceac1a229cd4'],
['123', '12345678123456781234567812345678'],
['t5069610-744b-42a7-8bd8-ceac1a229cd4'],
['e5069610-744b-42a7-8bd8-ceac1a229cd44'],
['e50696100-744b-42a7-8bd8-ceac1a229cd4'],
['e5069610-744bb-42a7-8bd8-ceac1a229cd4']]
for uuid_list in invalid_uuid_lists:
msg = attributes._validate_uuid_list(uuid_list)
error = "'%s' is not a valid UUID" % uuid_list[0]
self.assertEquals(msg, error)
# check duplicate items in a list
duplicate_uuids = ['e5069610-744b-42a7-8bd8-ceac1a229cd4',
'f3eeab00-8367-4524-b662-55e64d4cacb5',
'e5069610-744b-42a7-8bd8-ceac1a229cd4']
msg = attributes._validate_uuid_list(duplicate_uuids)
error = ("Duplicate items in the list: "
"'%s'" % ', '.join(duplicate_uuids))
self.assertEquals(msg, error)
# check valid uuid lists
valid_uuid_lists = [['e5069610-744b-42a7-8bd8-ceac1a229cd4'],
['f3eeab00-8367-4524-b662-55e64d4cacb5'],
['e5069610-744b-42a7-8bd8-ceac1a229cd4',
'f3eeab00-8367-4524-b662-55e64d4cacb5']]
for uuid_list in valid_uuid_lists:
msg = attributes._validate_uuid_list(uuid_list)
self.assertEquals(msg, None)
def test_validate_dict(self):
for value in (None, True, '1', []):
self.assertEquals(attributes._validate_dict(value),
"'%s' is not a dictionary" % value)
msg = attributes._validate_dict({})
self.assertIsNone(msg)
msg = attributes._validate_dict({'key': 'value'})
self.assertIsNone(msg)
def test_validate_non_negative(self):
for value in (-1, '-2'):
self.assertEquals(attributes._validate_non_negative(value),
"'%s' should be non-negative" % value)
for value in (0, 1, '2', True, False):
msg = attributes._validate_non_negative(value)
self.assertIsNone(msg)
class TestConvertToBoolean(unittest2.TestCase):
def test_convert_to_boolean_bool(self):
self.assertIs(attributes.convert_to_boolean(True), True)
self.assertIs(attributes.convert_to_boolean(False), False)
def test_convert_to_boolean_int(self):
self.assertIs(attributes.convert_to_boolean(0), False)
self.assertIs(attributes.convert_to_boolean(1), True)
self.assertRaises(q_exc.InvalidInput,
attributes.convert_to_boolean,
7)
def test_convert_to_boolean_str(self):
self.assertIs(attributes.convert_to_boolean('True'), True)
self.assertIs(attributes.convert_to_boolean('true'), True)
self.assertIs(attributes.convert_to_boolean('False'), False)
self.assertIs(attributes.convert_to_boolean('false'), False)
self.assertIs(attributes.convert_to_boolean('0'), False)
self.assertIs(attributes.convert_to_boolean('1'), True)
self.assertRaises(q_exc.InvalidInput,
attributes.convert_to_boolean,
'7')
class TestConvertToInt(unittest2.TestCase):
def test_convert_to_int_int(self):
self.assertEqual(attributes.convert_to_int(-1), -1)
self.assertEqual(attributes.convert_to_int(0), 0)
self.assertEqual(attributes.convert_to_int(1), 1)
def test_convert_to_int_str(self):
self.assertEqual(attributes.convert_to_int('4'), 4)
self.assertEqual(attributes.convert_to_int('6'), 6)
self.assertRaises(q_exc.InvalidInput,
attributes.convert_to_int,
'garbage')
def test_convert_to_int_none(self):
self.assertRaises(q_exc.InvalidInput,
attributes.convert_to_int,
None)
def test_convert_none_to_empty_list_none(self):
self.assertEqual(
[], attributes.convert_none_to_empty_list(None))
def test_convert_none_to_empty_list_value(self):
values = ['1', 3, [], [1], {}, {'a': 3}]
for value in values:
self.assertEqual(
value, attributes.convert_none_to_empty_list(value))
class TestConvertKvp(unittest2.TestCase):
def test_convert_kvp_list_to_dict_succeeds_for_missing_values(self):
result = attributes.convert_kvp_list_to_dict(['True'])
self.assertEqual({}, result)
def test_convert_kvp_list_to_dict_succeeds_for_multiple_values(self):
result = attributes.convert_kvp_list_to_dict(
['a=b', 'a=c', 'a=c', 'b=a'])
self.assertEqual({'a': ['c', 'b'], 'b': ['a']}, result)
def test_convert_kvp_list_to_dict_succeeds_for_values(self):
result = attributes.convert_kvp_list_to_dict(['a=b', 'c=d'])
self.assertEqual({'a': ['b'], 'c': ['d']}, result)
def test_convert_kvp_str_to_list_fails_for_missing_key(self):
with self.assertRaises(q_exc.InvalidInput):
attributes.convert_kvp_str_to_list('=a')
def test_convert_kvp_str_to_list_fails_for_missing_equals(self):
with self.assertRaises(q_exc.InvalidInput):
attributes.convert_kvp_str_to_list('a')
def test_convert_kvp_str_to_list_succeeds_for_one_equals(self):
result = attributes.convert_kvp_str_to_list('a=')
self.assertEqual(['a', ''], result)
def test_convert_kvp_str_to_list_succeeds_for_two_equals(self):
result = attributes.convert_kvp_str_to_list('a=a=a')
self.assertEqual(['a', 'a=a'], result)
class TestConvertToList(unittest2.TestCase):
def test_convert_to_empty_list(self):
for item in (None, [], (), {}):
self.assertEquals(attributes.convert_to_list(item), [])
def test_convert_to_list_string(self):
for item in ('', 'foo'):
self.assertEquals(attributes.convert_to_list(item), [item])
def test_convert_to_list_iterable(self):
for item in ([None], [1, 2, 3], (1, 2, 3), set([1, 2, 3]), ['foo']):
self.assertEquals(attributes.convert_to_list(item), list(item))
def test_convert_to_list_non_iterable(self):
for item in (True, False, 1, 1.2, object()):
self.assertEquals(attributes.convert_to_list(item), [item])
|
rossella/neutron
|
quantum/tests/unit/test_attributes.py
|
Python
|
apache-2.0
| 21,755
|
##
# Copyright (c) 2010-2015 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Benchmark a server's handling of event deletion.
"""
from itertools import count
from urllib2 import HTTPDigestAuthHandler
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from twisted.web.http import NO_CONTENT
from contrib.performance.httpauth import AuthHandlerAgent
from contrib.performance.httpclient import StringProducer
from contrib.performance.benchlib import initialize, sample
from contrib.performance.benchmarks.event import makeEvent
@inlineCallbacks
def measure(host, port, dtrace, attendeeCount, samples):
organizerSequence = 1
user = password = "user%02d" % (organizerSequence,)
root = "/"
principal = "/"
calendar = "event-deletion-benchmark"
authinfo = HTTPDigestAuthHandler()
authinfo.add_password(
realm="Test Realm",
uri="http://%s:%d/" % (host, port),
user=user,
passwd=password)
agent = AuthHandlerAgent(Agent(reactor), authinfo)
# Set up the calendar first
yield initialize(agent, host, port, user, password, root, principal, calendar)
# An infinite stream of VEVENTs to PUT to the server.
events = ((i, makeEvent(i, organizerSequence, attendeeCount))
for i in count(2))
# Create enough events to delete
uri = 'http://%s:%d/calendars/__uids__/%s/%s/foo-%%d.ics' % (
host, port, user, calendar)
headers = Headers({"content-type": ["text/calendar"]})
urls = []
for i, body in events:
urls.append(uri % (i,))
yield agent.request(
'PUT', urls[-1], headers, StringProducer(body))
if len(urls) == samples:
break
# Now delete them all
samples = yield sample(
dtrace, samples,
agent, (('DELETE', url) for url in urls).next,
NO_CONTENT)
returnValue(samples)
|
red-hood/calendarserver
|
contrib/performance/benchmarks/event_delete.py
|
Python
|
apache-2.0
| 2,538
|
from syft import TensorBase
import syft
import unittest
from syft import tensor
import numpy as np
import math
# Here's our "unit tests".
class DimTests(unittest.TestCase):
def test_dim_one(self):
t = TensorBase(np.array([1, 2, 3]))
self.assertTrue(t.dim() == 1)
def test_view(self):
t = TensorBase([1.0, 2.0, 3.0])
self.assertTrue(syft.equal(t.view(-1, 1), TensorBase(np.array([[1], [2], [3]]))))
def test_as_view(self):
t = TensorBase(np.array([1.0, 2.0, 3.0]))
t1 = t.view([-1, 1])
print(t.data.dtype)
self.assertTrue(syft.equal(t.view_as(t1), TensorBase(np.array([[1.0], [2.0], [3.0]]))))
def test_resize(self):
t = TensorBase(np.array([1.0, 2.0, 3.0]))
t.resize_([1, 2])
self.assertEqual(t.data.shape, (1, 2))
def test_resize_as(self):
t = TensorBase(np.array([1, 2, 3]))
t2 = TensorBase(np.array([[1], [2]]))
t.resize_as_(t2)
self.assertEqual(t.data.shape, (2, 1))
def test_size(self):
t = TensorBase([1, 2, 3])
t1 = TensorBase([1.0, 2.0, 3.0])
self.assertEqual(t.size(), t1.size())
def test_nelement(self):
t = TensorBase(np.array([[1, 2], [3, 4]]))
t1 = TensorBase(np.array([[5.0, 6.0], [7.0, 8.0]]))
self.assertEqual(t.nelement(), t1.nelement())
class AddTests(unittest.TestCase):
def test_simple(self):
t = TensorBase(np.array([1, 2, 3]))
self.assertTrue(syft.equal(t + np.array([1, 2, 3]), [2, 4, 6]))
def test_inplace(self):
t = TensorBase(np.array([1, 2, 3]))
t += np.array([1, 2, 3])
self.assertTrue(syft.equal(t.data, [2, 4, 6]))
def test_scalar(self):
t = TensorBase(np.array([1, 2, 3]))
self.assertTrue(syft.equal(t + 2, [3, 4, 5]))
class CeilTests(unittest.TestCase):
def test_ceil(self):
t = TensorBase(np.array([1.4, 2.7, 6.2]))
tdash = t.ceil()
self.assertTrue(syft.equal(tdash.data, TensorBase([2, 3, 7])))
self.assertTrue(syft.equal(t.data, TensorBase([1.4, 2.7, 6.2])))
def test_ceil_(self):
t = TensorBase(np.array([1.4, 2.7, 6.2]))
self.assertTrue(syft.equal(t.ceil_(), [2, 3, 7]))
self.assertTrue(syft.equal(t.data, [2, 3, 7]))
class ZeroTests(unittest.TestCase):
def test_zero(self):
t = TensorBase(np.array([13, 42, 1024]))
self.assertTrue(syft.equal(t.zero_(), [0, 0, 0]))
t = TensorBase(np.array([13.1, 42.2, 1024.4]))
self.assertTrue(syft.equal(t.zero_(), [0.0, 0.0, 0.0]))
class FloorTests(unittest.TestCase):
def test_floor_(self):
t = TensorBase(np.array([1.4, 2.7, 6.2]))
self.assertTrue(syft.equal(t.floor_(), [1., 2., 6.]))
self.assertTrue(syft.equal(t.data, [1., 2., 6.]))
class SubTests(unittest.TestCase):
def test_simple(self):
t = TensorBase(np.array([1, 2, 3]))
self.assertTrue(syft.equal(t - np.array([1, 2, 3]), [0, 0, 0]))
def test_inplace(self):
t = TensorBase(np.array([1, 2, 3]))
t -= np.array([1, 2, 3])
self.assertTrue(syft.equal(t.data, [0, 0, 0]))
def test_scalar(self):
t = TensorBase(np.array([1, 2, 3]))
self.assertTrue(syft.equal(t - 1, [0, 1, 2]))
class MaxTests(unittest.TestCase):
def test_no_dim(self):
t = TensorBase(np.array([[0.77937768, 0.51022484, 0.49155195, 0.02769902], [0.03777148, 0.13020167, 0.02155692, 0.69574893]]))
self.assertTrue(t.max() == 0.77937768)
def test_axis(self):
t = TensorBase(np.array([[0.77937768, 0.51022484, 0.49155195, 0.02769902], [0.03777148, 0.13020167, 0.02155692, 0.69574893]]))
result = t.max(axis=1)
self.assertTrue(syft.equal(result, [0.77937768, 0.69574893]))
result = t.max(axis=0)
self.assertTrue(syft.equal(result, [0.77937768, 0.51022484, 0.49155195, 0.69574893]))
class MultTests(unittest.TestCase):
def test_simple(self):
t = TensorBase(np.array([1, 2, 3]))
self.assertTrue(syft.equal(t * np.array([1, 2, 3]), [1, 4, 9]))
def test_inplace(self):
t = TensorBase(np.array([1, 2, 3]))
t *= np.array([1, 2, 3])
self.assertTrue(syft.equal(t.data, [1, 4, 9]))
def test_scalar(self):
t = TensorBase(np.array([1, 2, 3]))
self.assertTrue(syft.equal(t * 2, [2, 4, 6]))
class DivTests(unittest.TestCase):
def test_simple(self):
t = TensorBase(np.array([2, 4, 8]))
self.assertTrue(syft.equal(t / np.array([2, 2, 2]), [1, 2, 4]))
def test_inplace(self):
t = TensorBase(np.array([2, 4, 8]))
t /= np.array([2, 2, 2])
self.assertTrue(syft.equal(t.data, [1, 2, 4]))
t = TensorBase(np.array([1, 7, 11]))
t /= np.array([3, 2, 9])
self.assertTrue(syft.equal(t, [1 / 3, 7 / 2, 11 / 9]))
def test_scalar(self):
t = TensorBase(np.array([2, 4, 6]))
self.assertTrue(syft.equal(t / 2, [1, 2, 3]))
class AbsTests(unittest.TestCase):
def test_abs(self):
t = TensorBase(np.array([-1, -2, 3]))
self.assertTrue(np.array_equal(t.abs(), [1, 2, 3]))
def test_abs_(self):
t = TensorBase(np.array([-1, -2, 3]))
self.assertTrue(np.array_equal(t.abs_(), t.data))
class ShapeTests(unittest.TestCase):
def test_shape(self):
t = TensorBase(np.array([[0, 1], [0, 5]]))
self.assertTrue(syft.equal(t.shape(), (2, 2)))
class SqrtTests(unittest.TestCase):
def test_sqrt(self):
t = TensorBase(np.array([[0, 4], [9, 16]]))
self.assertTrue(syft.equal(t.sqrt(), ([[0, 2], [3, 4]])))
def test_sqrt_(self):
t = TensorBase(np.array([[0, 4], [9, 16]]))
t.sqrt_()
self.assertTrue(syft.equal(t, ([[0, 2], [3, 4]])))
class SumTests(unittest.TestCase):
def test_dim_none_int(self):
t = TensorBase(np.array([1, 2, 3]))
self.assertTrue(syft.equal(t.sum(), 6))
def test_dim_is_not_none_int(self):
t = TensorBase(np.array([[0, 1], [0, 5]]))
self.assertTrue(syft.equal(t.sum(dim=1), [1, 5]))
class EqualTests(unittest.TestCase):
def test_equal(self):
t1 = TensorBase(np.array([1.2, 2, 3]))
t2 = TensorBase(np.array([1.2, 2, 3]))
self.assertTrue(syft.equal(t1, t2))
def test_equal_operation(self):
t1 = TensorBase(np.array([1, 2.4, 3]))
t2 = TensorBase(np.array([1, 2.4, 3]))
self.assertTrue(t1 == t2)
def test_not_equal(self):
t1 = TensorBase(np.array([1, 2, 3]))
t2 = TensorBase(np.array([1, 4, 3]))
self.assertFalse(syft.equal(t1, t2))
def test_inequality_operation(self):
t1 = TensorBase(np.array([1, 2, 3]))
t2 = TensorBase(np.array([1, 4, 5]))
self.assertTrue(t1 != t2)
class IndexTests(unittest.TestCase):
def test_indexing(self):
t1 = TensorBase(np.array([1.2, 2, 3]))
self.assertEqual(1.2, t1[0])
self.assertEqual(3, t1[-1])
class sigmoidTests(unittest.TestCase):
def test_sigmoid(self):
t1 = TensorBase(np.array([1.2, 3.3, 4]))
self.assertTrue(syft.equal(t1.sigmoid_(), TensorBase(
[0.76852478, 0.96442881, 0.98201379])))
class addmm(unittest.TestCase):
def test_addmm_1d(self):
t1 = TensorBase(np.array([1, 2, 3]))
t2 = TensorBase(np.array([2, 3, 4]))
mat = TensorBase(np.array([5]))
out = t1.addmm(t2, mat, beta=2, alpha=2)
self.assertTrue(np.array_equal(out.data, [50]))
def test_addmm_2d(self):
t1 = TensorBase(np.array([[1, 2], [1, 2]]))
t2 = TensorBase(np.array([[1, 2], [1, 2]]))
mat = TensorBase(np.array([[2, 3], [3, 4]]))
out = t1.addmm(t2, mat, beta=2, alpha=2)
self.assertTrue(np.array_equal(out.data, [[10, 18], [12, 20]]))
def test_addmm__1d(self):
t1 = TensorBase(np.array([1, 2, 3]))
t2 = TensorBase(np.array([2, 3, 4]))
mat = TensorBase(np.array([5]))
t1.addmm_(t2, mat, beta=2, alpha=2)
self.assertTrue(np.array_equal(t1.data, [50]))
def test_addmm__2d(self):
t1 = TensorBase(np.array([[1, 2], [1, 2]]))
t2 = TensorBase(np.array([[1, 2], [1, 2]]))
mat = TensorBase(np.array([[2, 3], [3, 4]]))
t1.addmm_(t2, mat, beta=2, alpha=2)
self.assertTrue(np.array_equal(t1.data, [[10, 18], [12, 20]]))
class addcmulTests(unittest.TestCase):
def test_addcmul_1d(self):
t1 = TensorBase(np.array([1, 2, 3]))
t2 = TensorBase(np.array([2, 3, 4]))
mat = TensorBase(np.array([5]))
out = t1.addcmul(t2, mat, value=2)
self.assertTrue(np.array_equal(out.data, [9, 17, 29]))
def test_addcmul_2d(self):
t1 = TensorBase(np.array([[1, 2], [1, 2]]))
t2 = TensorBase(np.array([[1, 2], [1, 2]]))
mat = TensorBase(np.array([[2, 3], [3, 4]]))
out = t1.addcmul(t2, mat, value=2)
self.assertTrue(np.array_equal(out.data, [[4, 11], [5, 12]]))
def test_addcmul__1d(self):
t1 = TensorBase(np.array([1, 2, 3]))
t2 = TensorBase(np.array([2, 3, 4]))
mat = TensorBase(np.array([5]))
t1.addcmul_(t2, mat, value=2)
self.assertTrue(np.array_equal(t1.data, [9, 17, 29]))
def test_addcmul__2d(self):
t1 = TensorBase(np.array([[1, 2], [1, 2]]))
t2 = TensorBase(np.array([[1, 2], [1, 2]]))
mat = TensorBase(np.array([[2, 3], [3, 4]]))
t1.addcmul_(t2, mat, value=2)
self.assertTrue(np.array_equal(t1.data, [[4, 11], [5, 12]]))
class addcdivTests(unittest.TestCase):
def test_addcdiv_1d(self):
t1 = TensorBase(np.array([1, 2, 3]))
t2 = TensorBase(np.array([2, 5, 4]))
mat = TensorBase(np.array([5]))
out = t1.addcdiv(t2, mat, value=2)
self.assertTrue(np.array_equal(out.data, [6., 5.8, 6.5]))
def test_addcdiv_2d(self):
t1 = TensorBase(np.array([[1, 2], [1, 2]]))
t2 = TensorBase(np.array([[1, 2], [1, 2]]))
mat = TensorBase(np.array([[2, 3], [3, 4]]))
out = t1.addcdiv(t2, mat, value=2)
self.assertTrue(np.array_equal(out.data, [[4., 5.], [5., 6.]]))
def test_addcdiv__1d(self):
t1 = TensorBase(np.array([1, 2, 3]))
t2 = TensorBase(np.array([2, 5, 4]))
mat = TensorBase(np.array([5]))
t1.addcdiv_(t2, mat, value=2)
self.assertTrue(np.array_equal(t1.data, [6., 5.8, 6.5]))
def test_addcdiv__2d(self):
t1 = TensorBase(np.array([[1, 2], [1, 2]]))
t2 = TensorBase(np.array([[1, 2], [1, 2]]))
mat = TensorBase(np.array([[2, 3], [3, 4]]))
t1.addcdiv_(t2, mat, value=2)
self.assertTrue(np.array_equal(t1.data, [[4., 5.], [5., 6.]]))
class addmvTests(unittest.TestCase):
def test_addmv(self):
t1 = TensorBase(np.array([1, 2]))
vec = TensorBase(np.array([1, 2, 3, 4]))
mat = TensorBase(np.array([[2, 3, 3, 4], [5, 6, 6, 7]]))
out = t1.addmv(mat, vec, beta=2, alpha=2)
self.assertTrue(np.array_equal(out.data, [68, 130]))
def test_addmv_(self):
t1 = TensorBase(np.array([1, 2]))
vec = TensorBase(np.array([1, 2, 3, 4]))
mat = TensorBase(np.array([[2, 3, 3, 4], [5, 6, 6, 7]]))
t1.addmv_(mat, vec, beta=2, alpha=2)
self.assertTrue(np.array_equal(t1.data, [68, 130]))
class bmmTests(unittest.TestCase):
def test_bmm_size(self):
t1 = TensorBase(np.random.rand(4, 3, 2))
t2 = TensorBase(np.random.rand(4, 2, 1))
out = t1.bmm(t2)
self.assertTupleEqual(out.shape(), (4, 3, 1))
def test_bmm(self):
t1 = TensorBase(np.array([[[3, 1]], [[1, 2]]]))
t2 = TensorBase(np.array([[[1], [3]], [[4], [8]]]))
out = t1.bmm(t2)
test_result = np.array([[[6]], [[20]]])
self.assertTrue(np.array_equal(out.data, test_result))
class addbmmTests(unittest.TestCase):
def test_addbmm(self):
t1 = TensorBase(np.array([[[3, 4], [5, 6]], [[7, 8], [1, 2]]]))
t2 = TensorBase(np.array([[[3, 5], [5, 7]], [[7, 9], [1, 3]]]))
mat = TensorBase(np.array([[2, 3], [3, 4]]))
out = t1.addbmm(t2, mat, beta=2, alpha=2)
self.assertTrue(np.array_equal(out.data, [[176, 266], [114, 172]]))
def test_addbmm_(self):
t1 = TensorBase(np.array([[[3, 4], [5, 6]], [[7, 8], [1, 2]]]))
t2 = TensorBase(np.array([[[3, 5], [5, 7]], [[7, 9], [1, 3]]]))
mat = TensorBase(np.array([[2, 3], [3, 4]]))
t1.addbmm_(t2, mat, beta=2, alpha=2)
self.assertTrue(np.array_equal(t1.data, [[176, 266], [114, 172]]))
class baddbmmTests(unittest.TestCase):
def test_baddbmm(self):
t1 = TensorBase(np.array([[[3, 4], [5, 6]], [[7, 8], [1, 2]]]))
t2 = TensorBase(np.array([[[3, 5], [5, 7]], [[7, 9], [1, 3]]]))
mat = TensorBase(np.array([[[2, 3], [3, 4]], [[4, 5], [5, 6]]]))
out = t1.baddbmm(t2, mat, beta=2, alpha=2)
self.assertTrue(np.array_equal(out.data, [[[62, 92], [96, 142]],
[[122, 184], [28, 42]]]))
def test_baddbmm_(self):
t1 = TensorBase(np.array([[[3, 4], [5, 6]], [[7, 8], [1, 2]]]))
t2 = TensorBase(np.array([[[3, 5], [5, 7]], [[7, 9], [1, 3]]]))
mat = TensorBase(np.array([[[2, 3], [3, 4]], [[4, 5], [5, 6]]]))
t1.baddbmm_(t2, mat, beta=2, alpha=2)
self.assertTrue(np.array_equal(t1.data, [[[62, 92], [96, 142]],
[[122, 184], [28, 42]]]))
class PermuteTests(unittest.TestCase):
# WARNING: don't change the name of this function
def dest3d(self):
t = TensorBase(np.ones((2, 3, 5)))
tdash = t.permute((2, 0, 1))
self.assertTrue(tdash.data.shape == [5, 2, 3])
self.assertTrue(t.data.shape == [2, 3, 5])
class transposeTests(unittest.TestCase):
def test_transpose(self):
t1 = TensorBase(np.array([[[3, 4], [5, 6]], [[7, 8], [1, 2]]]))
out1 = t1.transpose(0, 1)
self.assertTrue(np.array_equal(out1.data, [[[3, 4], [7, 8]],
[[5, 6], [1, 2]]]))
out2 = t1.transpose(0, 2)
self.assertTrue(np.array_equal(out2.data, [[[3, 7], [5, 1]],
[[4, 8], [6, 2]]]))
out3 = t1.transpose(1, 2)
self.assertTrue(np.array_equal(out3.data, [[[3, 5], [4, 6]],
[[7, 1], [8, 2]]]))
def test_transpose_(self):
t1 = TensorBase(np.array([[[3, 4], [5, 6]], [[7, 8], [1, 2]]]))
t1.transpose_(0, 1)
self.assertTrue(np.array_equal(t1.data, [[[3, 4], [7, 8]],
[[5, 6], [1, 2]]]))
t2 = TensorBase(np.array([[[3, 4], [5, 6]], [[7, 8], [1, 2]]]))
t2.transpose_(0, 2)
self.assertTrue(np.array_equal(t2.data, [[[3, 7], [5, 1]],
[[4, 8], [6, 2]]]))
t3 = TensorBase(np.array([[[3, 4], [5, 6]], [[7, 8], [1, 2]]]))
t3.transpose_(1, 2)
self.assertTrue(np.array_equal(t3.data, [[[3, 5], [4, 6]],
[[7, 1], [8, 2]]]))
def test_t(self):
t1 = TensorBase(np.array([[[3, 4], [5, 6]], [[7, 8], [1, 2]]]))
out1 = t1.t()
self.assertTrue(np.array_equal(out1.data, [[[3, 4], [7, 8]],
[[5, 6], [1, 2]]]))
class unsqueezeTests(unittest.TestCase):
def test_unsqueeze(self):
t1 = TensorBase(np.arange(3 * 4 * 5).reshape((3, 4, 5)))
for i in range(len(t1.data.shape)):
out = t1.unsqueeze(i)
expected_shape = list(t1.data.shape)
expected_shape.insert(i, 1)
self.assertTrue(np.array_equal(out.data.shape, expected_shape))
def test_unsqueeze_(self):
test_shape = (3, 4, 5)
for i in range(len(test_shape)):
t1 = TensorBase(np.arange(3 * 4 * 5).reshape(test_shape))
expected_shape = list(t1.data.shape)
expected_shape.insert(i, 1)
t1.unsqueeze_(i)
self.assertTrue(np.array_equal(t1.data.shape, expected_shape))
class expTests(unittest.TestCase):
def test_exp(self):
t3 = TensorBase(np.array([[[1, 3], [3, 5]],
[[5, 7], [9, 1]]]))
out = t3.exp()
self.assertTrue(np.allclose(out.data, [[[2.71828183e+00, 2.00855369e+01], [2.00855369e+01, 1.48413159e+02]],
[[1.48413159e+02, 1.09663316e+03], [8.10308393e+03, 2.71828183e+00]]]))
def test_exp_(self):
t3 = TensorBase(np.array([[[1, 3], [3, 5]],
[[5, 7], [9, 1]]]))
t3.exp_()
self.assertTrue(np.allclose(t3.data, [[[2.71828183e+00, 2.00855369e+01], [2.00855369e+01, 1.48413159e+02]],
[[1.48413159e+02, 1.09663316e+03], [8.10308393e+03, 2.71828183e+00]]]))
class fracTests(unittest.TestCase):
def test_frac(self):
t3 = TensorBase(np.array([1.23, 4.56, 7.89]))
out = t3.frac()
self.assertTrue(np.allclose(out.data, [0.23, 0.56, 0.89]))
def test_frac_(self):
t3 = TensorBase(np.array([1.23, 4.56, 7.89]))
t3.frac_()
self.assertTrue(np.allclose(t3.data, [0.23, 0.56, 0.89]))
class rsqrtTests(unittest.TestCase):
def test_rsqrt(self):
t1 = TensorBase(np.array([2, 3, 4]))
out = t1.rsqrt()
self.assertTrue(np.allclose(out.data, [0.70710678, 0.57735027, 0.5]))
def test_rsqrt_(self):
t1 = TensorBase(np.array([2, 3, 4]))
t1.rsqrt_()
self.assertTrue(np.allclose(t1.data, [0.70710678, 0.57735027, 0.5]))
class signTests(unittest.TestCase):
def test_sign(self):
t1 = TensorBase(np.array([1, 2, -1, -2]))
out = t1.sign()
self.assertTrue(np.array_equal(out.data, [1, 1, -1, -1]))
def test_sign_(self):
t1 = TensorBase(np.array([1, 2, -1, -2]))
t1.sign_()
self.assertTrue(np.array_equal(t1.data, [1, 1, -1, -1]))
class numpyTests(unittest.TestCase):
def test_numpy(self):
t1 = TensorBase(np.array([[1, 2], [3, 4]]))
self.assertTrue(np.array_equal(t1.to_numpy(), np.array([[1, 2], [3, 4]])))
class reciprocalTests(unittest.TestCase):
def test_reciprocal(self):
t1 = TensorBase(np.array([2, 3, 4]))
out = t1.reciprocal()
self.assertTrue(np.allclose(out.data, [0.5, 0.33333333, 0.25]))
def test_reciprocal_(self):
t1 = TensorBase(np.array([2, 3, 4]))
t1.reciprocal_()
self.assertTrue(np.allclose(t1.data, [0.5, 0.33333333, 0.25]))
class logTests(unittest.TestCase):
def test_log(self):
t1 = TensorBase(np.array([math.exp(1), math.exp(2), math.exp(3)]))
self.assertTrue(np.array_equal((t1.log()).data, [1., 2., 3.]))
def test_log_(self):
t1 = TensorBase(np.array([math.exp(1), math.exp(2), math.exp(3)]))
self.assertTrue(np.array_equal((t1.log_()).data, [1., 2., 3.]))
def test_log_1p(self):
t1 = TensorBase(np.array([1, 2, 3]))
self.assertTrue(np.allclose((t1.log1p()).data, [0.69314718, 1.09861229, 1.38629436]))
def test_log_1p_(self):
t1 = TensorBase(np.array([1, 2, 3]))
self.assertTrue(np.allclose((t1.log1p_()).data, [0.69314718, 1.09861229, 1.38629436]))
class clampTests(unittest.TestCase):
def test_clamp_int(self):
t1 = TensorBase(np.arange(10))
t2 = t1.clamp(minimum=2, maximum=7)
expected_tensor = TensorBase(np.array([2, 2, 2, 3, 4, 5, 6, 7, 7, 7]))
self.assertEqual(t2, expected_tensor)
def test_clamp_float(self):
t1 = TensorBase(np.arange(1, step=0.1))
t2 = t1.clamp(minimum=0.2, maximum=0.7)
expected_tensor = TensorBase(np.array([0.2, 0.2, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.7, 0.7]))
self.assertEqual(t2, expected_tensor)
def test_clamp_int_in_place(self):
t1 = TensorBase(np.arange(10))
t1.clamp_(minimum=2, maximum=7)
expected_tensor = TensorBase(np.array([2, 2, 2, 3, 4, 5, 6, 7, 7, 7]))
self.assertEqual(t1, expected_tensor)
def test_clamp_float_in_place(self):
t1 = TensorBase(np.arange(1, step=0.1))
t1.clamp_(minimum=0.2, maximum=0.7)
expected_tensor = TensorBase(np.array([0.2, 0.2, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.7, 0.7]))
self.assertEqual(t1, expected_tensor)
class cloneTests(unittest.TestCase):
def test_clone(self):
t1 = TensorBase(np.random.randint(0, 10, size=(5, 10)))
t2 = t1.clone()
self.assertEqual(t1, t2)
self.assertIsNot(t1, t2)
class chunkTests(unittest.TestCase):
def test_chunk(self):
t1 = TensorBase(np.random.randint(0, 10, size=(5, 10)))
t2, t3 = t1.chunk(2, 0)
self.assertNotEqual(t2.shape(), t3.shape())
def test_chunk_same_size(self):
t1 = TensorBase(np.random.randint(0, 10, size=(4, 10)))
t2, t3 = t1.chunk(2, 0, same_size=True)
self.assertEqual(t2.shape(), t3.shape())
class inequalityTest(unittest.TestCase):
# WARNING: this is an ovverriden function from unittest.Testcase.
# never change the name of this function.
def setUp(self):
self.a1 = np.array([-2, -1, 0, 1, 2])
self.a2 = np.array([-4, -1, 5, 2, 2])
self.t1 = TensorBase(self.a1)
self.t2 = TensorBase(self.a2)
self.enc = TensorBase(self.a1, encrypted=True)
class gtTests(inequalityTest):
def test_gt_with_tensor(self):
self.assertEqual(self.t1.gt(self.t2), self.a1 > self.a2)
def test_gt_with_number(self):
self.assertEqual(self.t1.gt(1), self.a1 > 1)
def test_gt__in_place_with_tensor(self):
self.t1.gt_(self.t2)
self.assertEqual(self.t1, self.a1 > self.a2)
def test_gt__in_place_with_number(self):
self.t1.gt_(1)
self.assertEqual(self.t1, self.a1 > 1)
def test_gt_with_encrypted(self):
res = self.t1.gt(self.enc)
self.assertEqual(res, NotImplemented)
res = self.enc.gt(self.t1)
self.assertEqual(res, NotImplemented)
class geTests(inequalityTest):
def test_ge_with_tensor(self):
self.assertEqual(self.t1.ge(self.t2), self.a1 >= self.a2)
def test_ge_with_number(self):
self.assertEqual(self.t1.ge(1), self.a1 >= 1)
def test_ge__in_place_with_tensor(self):
self.t1.ge_(self.t2)
self.assertEqual(self.t1, self.a1 >= self.a2)
def test_ge__in_place_with_number(self):
self.t1.ge_(1)
self.assertEqual(self.t1, self.a1 >= 1)
def test_ge_with_encrypted(self):
res = self.t1.ge(self.enc)
self.assertEqual(res, NotImplemented)
res = self.enc.ge(self.t1)
self.assertEqual(res, NotImplemented)
class ltTests(inequalityTest):
def test_lt_with_tensor(self):
self.assertEqual(self.t1.lt(self.t2), self.a1 < self.a2)
def test_lt_with_number(self):
self.assertEqual(self.t1.lt(1), self.a1 < 1)
def test_lt__in_place_with_tensor(self):
self.t1.lt_(self.t2)
self.assertEqual(self.t1, self.a1 < self.a2)
def test_lt__in_place_with_number(self):
self.t1.lt_(1)
self.assertEqual(self.t1, self.a1 < 1)
def test_lt_with_encrypted(self):
res = self.t1.lt(self.enc)
self.assertEqual(res, NotImplemented)
res = self.enc.lt(self.t1)
self.assertEqual(res, NotImplemented)
class leTests(inequalityTest):
def test_le_with_tensor(self):
self.assertEqual(self.t1.le(self.t2), self.a1 <= self.a2)
def test_le_with_number(self):
self.assertEqual(self.t1.le(1), self.a1 <= 1)
def test_le__in_place_with_tensor(self):
self.t1.le_(self.t2)
self.assertEqual(self.t1, self.a1 <= self.a2)
def test_le__in_place_with_number(self):
self.t1.le_(1)
self.assertEqual(self.t1, self.a1 <= 1)
def test_le_with_encrypted(self):
res = self.t1.le(self.enc)
self.assertEqual(res, NotImplemented)
res = self.enc.le(self.t1)
self.assertEqual(res, NotImplemented)
class bernoulliTests(unittest.TestCase):
def test_bernoulli(self):
p = TensorBase(np.random.uniform(size=(3, 2)))
t1 = TensorBase(np.zeros((5, 5)))
t2 = t1.bernoulli(p)
self.assertTupleEqual((3, 2), t2.shape())
self.assertTrue(np.all(t2.data >= 0) and np.all(t2.data <= 1))
def test_bernoulli_(self):
p = TensorBase(np.random.uniform(size=(3, 2)))
t1 = TensorBase(np.zeros((5, 5)))
t1.bernoulli_(p)
self.assertTupleEqual((3, 2), t1.shape())
self.assertTrue(np.all(t1.data >= 0) and np.all(t1.data <= 1))
class cauchyTests(unittest.TestCase):
def test_cauchy_(self):
t = TensorBase(np.zeros([1, 2, 3, 4]))
t.cauchy_()
self.assertTupleEqual((1, 2, 3, 4), t.shape())
self.assertTrue(np.all(t.data != 0))
class uniformTests(unittest.TestCase):
def test_uniform(self):
t1 = TensorBase(np.zeros(4))
out = t1.uniform(low=0, high=3)
self.assertTrue(np.all(out.data > 0) and np.all(out.data < 3))
def test_uniform_(self):
t1 = TensorBase(np.zeros(4))
t1.uniform_(low=0, high=3)
self.assertTrue(np.all(t1.data > 0) and np.all(t1.data < 3))
class geometricTests(unittest.TestCase):
def test_geometric_(self):
t1 = TensorBase(np.zeros((4, 4)))
out = t1.geometric_(p=0.5)
self.assertTupleEqual(t1.data.shape, out.data.shape)
self.assertTrue(np.all(out.data > 0))
class normalTests(unittest.TestCase):
def test_normal_(self):
t = TensorBase(np.zeros([1, 2, 3, 4]))
t.normal_(mu=0, sigma=1)
self.assertTupleEqual((1, 2, 3, 4), t.shape())
self.assertTrue(np.all(t.data != 0))
def test_normal(self):
t = TensorBase(np.zeros([1, 2, 3, 4]))
t1 = t.normal(mu=0, sigma=1)
self.assertTrue(np.array_equal(t.data, np.zeros([1, 2, 3, 4])))
self.assertTupleEqual((1, 2, 3, 4), t1.shape())
self.assertTrue(np.all(t1.data != 0))
class fillTests(unittest.TestCase):
def test_fill_(self):
t1 = TensorBase(np.array([1, 2, 3, 4]))
t1.fill_(5)
self.assertTrue(np.all(t1.data == 5))
class topkTests(unittest.TestCase):
def test_topK(self):
t1 = TensorBase(np.array([[900, 800, 1000, 2000, 5, 10, 20, 40, 50], [10, 11, 12, 13, 5, 6, 7, 8, 9], [30, 40, 50, 10, 8, 1, 2, 3, 4]]))
t2 = t1.topk(3, largest=True)
self.assertTrue(np.array_equal(t2.data, np.array([[900, 1000, 2000], [11, 12, 13], [30, 40, 50]])))
class tolistTests(unittest.TestCase):
def test_to_list(self):
t1 = TensorBase(np.array([200, 300, 90, 100, 600]))
t1_list = t1.tolist()
self.assertTrue(isinstance(t1_list, list))
class traceTests(unittest.TestCase):
def test_trace(self):
t1 = TensorBase(np.arange(1, 10).reshape(3, 3))
self.assertTrue(np.equal(t1.trace().data, 15))
class roundTests(unittest.TestCase):
def test_round(self):
t1 = TensorBase(np.array([10.4, 9.6, 100.12, 4.0]))
t2 = t1.round(0)
self.assertTrue(np.array_equal(t2.data, np.array([10., 10., 100., 4.])))
def test_round_(self):
t1 = TensorBase(np.array([10.4, 9.6, 100.12, 4.0]))
t1.round_(0)
self.assertTrue(np.array_equal(t1.data, np.array([10., 10., 100., 4.])))
class repeatTests(unittest.TestCase):
def test_repeat(self):
t1 = TensorBase(np.array([1, 2, 3]))
t2 = t1.repeat(reps=(4, 2))
self.assertTrue(np.array_equal(t2.data, np.array([[1, 2, 3, 1, 2, 3], [1, 2, 3, 1, 2, 3], [1, 2, 3, 1, 2, 3], [1, 2, 3, 1, 2, 3]])))
class powTests(unittest.TestCase):
def test_pow(self):
t1 = TensorBase(np.array([2, 4, 6]))
t2 = t1.pow(2)
self.assertTrue(np.array_equal(t2.data, np.array([4, 16, 36])))
def test_pow_(self):
t1 = TensorBase(np.array([2, 4, 6]))
t1.pow_(2)
self.assertTrue(np.array_equal(t1.data, np.array([4, 16, 36])))
class negTests(unittest.TestCase):
def test_neg(self):
# int
t1 = TensorBase(np.array([[-0, 1, -2], [0, -1, 2]]))
t2 = t1.neg()
self.assertTrue(np.array_equal(t1.data, np.array([[0, 1, -2], [0, -1, 2]])))
self.assertTrue(np.array_equal(t2.data, np.array([[0, -1, 2], [0, 1, -2]])))
# float
t3 = TensorBase(np.array([[-0.0, 1.5, -2.5], [0.0, -1.5, 2.5]]))
t4 = t3.neg()
self.assertTrue(np.array_equal(t3.data, np.array([[0.0, 1.5, -2.5], [0.0, -1.5, 2.5]])))
self.assertTrue(np.array_equal(t4.data, np.array([[0.0, -1.5, 2.5], [0.0, 1.5, -2.5]])))
def test_neg_(self):
# int
t1 = TensorBase(np.array([[-0, 1, -2], [0, -1, 2]]))
t1.neg_()
self.assertTrue(np.array_equal(t1.data, np.array([[0, -1, 2], [0, 1, -2]])))
# float
t2 = TensorBase(np.array([[-0.0, 1.5, -2.5], [0.0, -1.5, 2.5]]))
t2.neg_()
self.assertTrue(np.array_equal(t2.data, np.array([[0.0, -1.5, 2.5], [0.0, 1.5, -2.5]])))
class tanhTests(unittest.TestCase):
def test_tanh_(self):
# int
t1 = TensorBase(np.array([[-0, 1, -2], [0, -1, 2]]))
t1.tanh_()
self.assertTrue(np.array_equal(t1.data, np.tanh(np.array([[0, 1, -2], [0, -1, 2]]))))
# float
t1 = TensorBase(np.array([[-0.0, 1.5, -2.5], [0.0, -1.5, 2.5]]))
t1.tanh_()
self.assertTrue(np.array_equal(t1.data, np.tanh(np.array([[0.0, 1.5, -2.5], [0.0, -1.5, 2.5]]))))
class prodTests(unittest.TestCase):
def test_prod(self):
t1 = TensorBase(np.array([1, 2, 3]))
t2 = t1.prod()
self.assertTrue(np.equal(t2.data, 6))
class randomTests(unittest.TestCase):
def test_random_(self):
np.random.seed(0)
t1 = TensorBase(np.zeros(4))
t1.random_(low=0, high=5, size=4)
self.assertTrue(np.array_equal(t1.data, np.array([4, 0, 3, 3])))
class nonzeroTests(unittest.TestCase):
def test_non_zero(self):
t1 = TensorBase(np.array([[1, 0, 0], [0, 2, 5]]))
t2 = t1.nonzero()
self.assertTrue(np.array_equal(t2.data, np.array([[0, 1, 1], [0, 1, 2]])))
class cumprodTest(unittest.TestCase):
def test_cumprod(self):
t1 = TensorBase(np.array([[1, 2, 3], [4, 5, 6]]))
t2 = TensorBase(np.array([[1.0, 2.0, 3.0], [4.0, 10.0, 18.0]]))
t3 = TensorBase(np.array([[1, 2, 6], [4, 20, 120]]))
self.assertTrue(np.equal(t1.cumprod(dim=0), t2).all())
self.assertTrue(np.equal(t1.cumprod(dim=1), t3).all())
def test_cumprod_(self):
t1 = TensorBase(np.array([[1, 2, 3], [4, 5, 6]]))
t2 = TensorBase(np.array([[1.0, 2.0, 3.0], [4.0, 10.0, 18.0]]))
t3 = TensorBase(np.array([[1, 2, 6], [4, 20, 120]]))
self.assertTrue(np.equal(t1.cumprod_(dim=0), t2).all())
t1 = TensorBase(np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]))
self.assertTrue(np.equal(t1.cumprod_(dim=1), t3).all())
class splitTests(unittest.TestCase):
def test_split(self):
t1 = TensorBase(np.arange(8.0))
t2 = t1.split(4)
self.assertTrue(np.array_equal(t2, tuple((np.array([0., 1.]), np.array([2., 3.]), np.array([4., 5.]), np.array([6., 7.])))))
class squeezeTests(unittest.TestCase):
def test_squeeze(self):
t1 = TensorBase(np.zeros((2, 1, 2, 1, 2)))
t2 = t1.squeeze()
self.assertTrue(np.array_equal(t2.data, np.array([[[0., 0.], [0., 0.]], [[0., 0.], [0., 0.]]])))
class expandAsTests(unittest.TestCase):
def test_expand_as(self):
t1 = TensorBase(np.array([[1], [2], [3]]))
t2 = TensorBase(np.zeros((3, 4)))
t3 = t1.expand_as(t2)
self.assertTrue(np.array_equal(t2.data.shape, t3.data.shape))
class meanTests(unittest.TestCase):
def test_mean(self):
t1 = TensorBase(np.arange(8).reshape(2, 2, 2))
t2 = t1.mean(1, True)
self.assertTrue(np.array_equal(t2.data, np.array([[[1., 2.]], [[5., 6.]]])))
class notEqualTests(unittest.TestCase):
def test_ne(self):
t1 = TensorBase([1, 2, 3, 4])
t2 = TensorBase([1., 2., 3., 5.])
self.assertEqual(t1.ne(t2), TensorBase([1, 1, 1, 0]))
def test_ne_(self):
t1 = TensorBase([1, 2, 3, 4])
t2 = TensorBase([1., 2., 3., 5.])
t1.ne_(t2)
self.assertTrue(syft.equal(t1, TensorBase([1, 1, 1, 0])))
class index_selectTests(unittest.TestCase):
def test_index_select(self):
t = TensorBase(np.reshape(np.arange(0, 2 * 3 * 4), (2, 3, 4)))
idx = np.array([1, 0])
dim = 2
result = t.index_select(dim=dim, index=idx)
expected = np.array([[[1, 0], [5, 4], [9, 8]], [[13, 12], [17, 16], [21, 20]]])
self.assertTrue(np.array_equal(result.data, expected))
class gatherTests(unittest.TestCase):
def test_gather_numerical_1(self):
t = TensorBase(np.array([[65, 17], [14, 25], [76, 22]]))
idx = TensorBase(np.array([[0], [1], [0]]))
dim = 1
result = t.gather(dim=dim, index=idx)
self.assertTrue(np.array_equal(result.data, np.array([[65], [25], [76]])))
def test_gather_numerical_2(self):
t = TensorBase(np.array([[47, 74, 44], [56, 9, 37]]))
idx = TensorBase(np.array([[0, 0, 1], [1, 1, 0], [0, 1, 0]]))
dim = 0
result = t.gather(dim=dim, index=idx)
expexted = [[47, 74, 37], [56, 9, 44.], [47, 9, 44]]
self.assertTrue(np.array_equal(result.data, np.array(expexted)))
class scatterTests(unittest.TestCase):
def test_scatter_numerical_0(self):
t = TensorBase(np.zeros((3, 5)))
idx = TensorBase(np.array([[0, 0, 0, 0, 0]]))
src = 1.0
dim = 0
t.scatter_(dim=dim, index=idx, src=src)
self.assertTrue(np.array_equal(t.data, np.array([[1, 1, 1, 1, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])))
def test_scatter_numerical_1(self):
t = TensorBase(np.zeros((3, 5)))
idx = TensorBase(np.array([[0], [0], [0]]))
src = 1.0
dim = 1
t.scatter_(dim=dim, index=idx, src=src)
self.assertTrue(np.array_equal(t.data, np.array([[1, 0, 0, 0, 0], [1, 0, 0, 0, 0], [1, 0, 0, 0, 0]])))
def test_scatter_numerical_2(self):
t = TensorBase(np.zeros((3, 5)))
idx = TensorBase(np.array([[0], [0], [0]]))
src = 1.0
dim = -1
t.scatter_(dim=dim, index=idx, src=src)
self.assertTrue(np.array_equal(t.data, np.array([[1, 0, 0, 0, 0], [1, 0, 0, 0, 0], [1, 0, 0, 0, 0]])))
def test_scatter_numerical_3(self):
t = TensorBase(np.zeros((3, 5)))
idx = TensorBase(np.array([[0, 0, 0, 0, 0]]))
src = TensorBase(np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]))
dim = 0
t.scatter_(dim=dim, index=idx, src=src)
self.assertTrue(np.array_equal(t.data, np.array([[1, 2, 3, 4, 5], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])))
def test_scatter_numerical_4(self):
t = TensorBase(np.zeros((3, 5)))
idx = TensorBase(np.array([[0, 0, 0, 0, 0]]))
src = TensorBase(np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]))
dim = -2
t.scatter_(dim=dim, index=idx, src=src)
self.assertTrue(np.array_equal(t.data, np.array([[1, 2, 3, 4, 5], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])))
def test_scatter_numerical_5(self):
t = TensorBase(np.zeros((3, 5)))
idx = TensorBase(np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]))
src = TensorBase(np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]))
dim = 0
t.scatter_(dim=dim, index=idx, src=src)
self.assertTrue(np.array_equal(t.data, np.array([[6, 7, 8, 9, 10], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]])))
def test_scatter_numerical_6(self):
t = TensorBase(np.zeros((3, 4, 5)))
idx = [[[3, 0, 1, 1, 2], [0, 3, 3, 3, 3]], [[2, 0, 0, 0, 0], [2, 1, 0, 2, 0]],
[[0, 0, 1, 0, 2], [1, 3, 2, 2, 2]]]
src = [[[7, 84, 99, 71, 44], [79, 57, 2, 37, 62]], [[31, 44, 43, 54, 56], [72, 52, 21, 89, 95]],
[[5, 3, 99, 4, 52], [32, 88, 58, 62, 9]]]
dim = 1
t.scatter_(dim=dim, index=idx, src=src)
expected = [[[79, 84, 0, 0, 0], [0, 0, 99, 71, 0], [0, 0, 0, 0, 44], [7, 57, 2, 37, 62]],
[[0, 44, 21, 54, 95], [0, 52, 0, 0, 0], [72, 0, 0, 89, 0], [0, 0, 0, 0, 0]],
[[5, 3, 0, 4, 0], [32, 0, 99, 0, 0], [0, 0, 58, 62, 9], [0, 88, 0, 0, 0]]]
self.assertTrue(np.array_equal(t.data, np.array(expected)))
def test_scatter_index_type(self):
t = TensorBase(np.zeros((3, 5)))
idx = TensorBase(np.array([[0.0, 0.0, 0.0, 0.0, 0.0]]))
src = TensorBase(np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]))
dim = 0
with self.assertRaises(Exception):
t.scatter_(dim=dim, index=idx, src=src)
def test_scatter_index_out_of_range(self):
t = TensorBase(np.zeros((3, 5)))
idx = TensorBase(np.array([[5, 0, 0, 0, 0]]))
src = TensorBase(np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]))
dim = 0
with self.assertRaises(Exception):
t.scatter_(dim=dim, index=idx, src=src)
def test_scatter_dim_out_Of_range(self):
t = TensorBase(np.zeros((3, 5)))
idx = TensorBase(np.array([[0, 0, 0, 0, 0]]))
src = TensorBase(np.array([[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]))
dim = 4
with self.assertRaises(Exception):
t.scatter_(dim=dim, index=idx, src=src)
def test_scatter_index_src_dimension_mismatch(self):
t = TensorBase(np.zeros((3, 5)))
idx = TensorBase(np.array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0]]))
src = TensorBase(np.array([[1, 2, 3, 4, 5]]))
dim = 1
with self.assertRaises(Exception):
t.scatter_(dim=dim, index=idx, src=src)
class remainderTests(unittest.TestCase):
def tes_remainder(self):
t = TensorBase([[-2, -3], [4, 1]])
result = t.remainder(1.5)
self.assertTrue(np.array_equal(result.data, np.array([[1, 0], [1, 1]])))
def test_remainder_broadcasting(self):
t = TensorBase([[-2, -3], [4, 1]])
result = t.remainder([2, -3])
self.assertTrue(np.array_equal(result.data, np.array([[0, 0], [0, -2]])))
def test_remainder_(self):
t = TensorBase([[-2, -3], [4, 1]])
t.remainder_(2)
self.assertTrue(np.array_equal(t.data, np.array([[0, 1], [0, 1]])))
class testMv(unittest.TestCase):
def test_mv(self):
mat = TensorBase([[1, 2, 3], [2, 3, 4], [4, 5, 6]])
vector = TensorBase([1, 2, 3])
self.assertEqual(tensor.mv(mat, vector), TensorBase([14, 20, 32]))
def test_mv_tensor(self):
mat = TensorBase([[1, 2, 3], [1, 2, 3]])
vec = TensorBase([1, 2, 3])
self.assertEqual(mat.mv(vec), TensorBase([14, 14]))
class masked_scatter_Tests(unittest.TestCase):
def test_masked_scatter_1(self):
t = TensorBase(np.ones((2, 3)))
source = TensorBase([1, 2, 3, 4, 5, 6])
mask = TensorBase([[0, 1, 0], [1, 0, 1]])
t.masked_scatter_(mask, source)
self.assertTrue(np.array_equal(t, TensorBase([[1, 1, 1], [2, 1, 3]])))
def test_masked_scatter_braodcasting_1(self):
t = TensorBase(np.ones((2, 3)))
source = TensorBase([1, 2, 3, 4, 5, 6])
mask = TensorBase([0, 1, 0])
t.masked_scatter_(mask, source)
self.assertTrue(np.array_equal(t, TensorBase([[1, 1, 1], [1, 2, 1]])))
def test_masked_scatter_braodcasting_2(self):
t = TensorBase(np.ones((2, 3)))
source = TensorBase([1, 2, 3, 4, 5, 6])
mask = TensorBase([[1], [0]])
t.masked_scatter_(mask, source)
self.assertTrue(np.array_equal(t, TensorBase([[1, 2, 3], [1, 1, 1]])))
class masked_fill_Tests(unittest.TestCase):
def test_masked_fill_(self):
t = TensorBase(np.ones((2, 3)))
value = 2.0
mask = TensorBase([[0, 0, 0], [1, 1, 0]])
t.masked_fill_(mask, value)
self.assertTrue(np.array_equal(t, TensorBase([[1.0, 1.0, 1.0], [2.0, 2.0, 1.0]])))
def test_masked_fill_broadcasting(self):
t = TensorBase(np.ones((2, 3)))
value = 2
mask = TensorBase([[1], [0]])
t.masked_fill_(mask, value)
self.assertTrue(np.array_equal(t, TensorBase([[2, 2, 2], [1, 1, 1]])))
class masked_select_Tests(unittest.TestCase):
def test_masked_select(self):
t = TensorBase(np.arange(0, 6).reshape(2, 3))
mask = TensorBase([[0, 0, 0], [1, 1, 0]])
self.assertTrue(np.array_equal(tensor.masked_select(t, mask), TensorBase([3, 4])))
def test_masked_select_broadcasting_1(self):
t = TensorBase(np.arange(0, 6).reshape(2, 3))
mask = TensorBase([[1, 1, 0]])
self.assertTrue(np.array_equal(tensor.masked_select(t, mask), TensorBase([0, 1, 3, 4])))
def test_masked_select_broadcasting_2(self):
t = TensorBase([2.0])
mask = TensorBase([[1, 1, 0]])
self.assertTrue(np.array_equal(tensor.masked_select(t, mask), TensorBase([2.0, 2.0])))
def test_tensor_base_masked_select(self):
t = TensorBase(np.arange(0, 6).reshape(2, 3))
mask = TensorBase([[1, 1, 0]])
self.assertTrue(np.array_equal(t.masked_select(mask), TensorBase([0, 1, 3, 4])))
class eqTests(unittest.TestCase):
def test_eq_with_tensor(self):
t1 = TensorBase(np.arange(5))
t2 = TensorBase(np.arange(5)[-1::-1])
truth_values = t1.eq(t2)
self.assertEqual(truth_values, [False, False, True, False, False])
def test_eq_with_number(self):
t1 = TensorBase(np.arange(5))
truth_values = t1.eq(1)
self.assertEqual(truth_values, [False, True, False, False, False])
def test_eq_in_place_with_tensor(self):
t1 = TensorBase(np.arange(5))
t2 = TensorBase(np.arange(5)[-1::-1])
t1.eq_(t2)
self.assertEqual(t1, [False, False, True, False, False])
def test_eq_in_place_with_number(self):
t1 = TensorBase(np.arange(5))
t1.eq_(1)
self.assertEqual(t1, [False, True, False, False, False])
class mm_test(unittest.TestCase):
def test_mm_1d(self):
t1 = TensorBase(np.array([2, 3, 4]))
t2 = TensorBase(np.array([3, 4, 5]))
out = t1.mm(t2)
self.assertTrue(np.alltrue(out.data == [38]))
def test_mm_2d(self):
t1 = TensorBase(np.array([[1, 2], [1, 2]]))
t2 = TensorBase(np.array([[2, 3], [2, 3]]))
out = t1.mm(t2)
self.assertTrue(np.alltrue(out.data == [[6, 9], [6, 9]]))
def test_mm_3d(self):
t1 = TensorBase(np.array([[1, 2], [2, 3], [3, 4]]))
t2 = TensorBase(np.array([[1, 2, 3], [2, 3, 4]]))
out = t1.mm(t2)
self.assertTrue(np.alltrue(out.data == [[5, 8, 11], [8, 13, 18], [11, 18, 25]]))
class newTensorTests(unittest.TestCase):
def test_encrypted_error(self):
t1 = TensorBase(np.array([1, 1, 1]), encrypted=True)
t2 = t1.new([1, 1, 2], encrypted=True)
self.assertEqual(t2, NotImplemented)
def test_return_new_float_tensor(self):
t1 = TensorBase(np.array([1, 1, 1]))
t2 = t1.new(np.array([1., 1., 2.]))
self.assertTrue(t2.data.dtype == np.float64)
def test_return_new_int_tensor(self):
t1 = TensorBase(np.array([1, 1, 1]))
t2 = t1.new(np.array([1, 1, 2]))
self.assertTrue(t2.data.dtype == np.int64)
class half(unittest.TestCase):
def half_test_1(self):
t1 = TensorBase(np.array([2, 3, 4]))
self.assertTrue(np.alltrue(t1.half() == np.array([2, 3, 4]).astype('float16')))
def half_test_2(self):
t1 = TensorBase(np.array([[1.1, 2.1], [1.11, 2.11]]))
self.assertTrue(np.alltrue(t1.half() == np.array([[1.1, 2.1], [1.11, 2.11]]).astype('float16')))
class fmodTest(unittest.TestCase):
def test_fmod_number(self):
t1 = TensorBase(np.array([-3, -2, -1, 1, 2, 3]))
self.assertTrue(np.array_equal(t1.fmod(2).data, np.array([-1, 0, -1, 1, 0, 1])))
t2 = TensorBase(np.array([-3.5, -2.5, -1.5, 1.5, 2.5, 3.5]))
self.assertTrue(np.array_equal(t2.fmod(2.).data, np.array([-1.5, -0.5, -1.5, 1.5, 0.5, 1.5])))
def test_fmod_tensor(self):
t1 = TensorBase(np.array([-3, -2, -1, 1, 2, 3]))
divisor = np.array([2] * 6)
self.assertTrue(np.array_equal(t1.fmod(divisor).data, np.array([-1, 0, -1, 1, 0, 1])))
t2 = TensorBase(np.array([-3.5, -2.5, -1.5, 1.5, 2.5, 3.5]))
divisor = np.array([2.] * 6)
self.assertTrue(np.array_equal(t2.fmod(divisor).data, np.array([-1.5, -0.5, -1.5, 1.5, 0.5, 1.5])))
class fmod_Test(unittest.TestCase):
def test_fmod_number(self):
t1 = TensorBase(np.array([-3, -2, -1, 1, 2, 3]))
t1.fmod_(2)
self.assertTrue(np.array_equal(t1.data, np.array([-1, 0, -1, 1, 0, 1])))
t2 = TensorBase(np.array([-3.5, -2.5, -1.5, 1.5, 2.5, 3.5]))
t2.fmod_(2.)
self.assertTrue(np.array_equal(t2.data, np.array([-1.5, -0.5, -1.5, 1.5, 0.5, 1.5])))
def test_fmod_tensor(self):
t1 = TensorBase(np.array([-3, -2, -1, 1, 2, 3]))
divisor = np.array([2] * 6)
t1.fmod_(divisor)
self.assertTrue(np.array_equal(t1.data, np.array([-1, 0, -1, 1, 0, 1])))
t2 = TensorBase(np.array([-3.5, -2.5, -1.5, 1.5, 2.5, 3.5]))
divisor = np.array([2.] * 6)
t2.fmod_(divisor)
self.assertTrue(np.array_equal(t2.data, np.array([-1.5, -0.5, -1.5, 1.5, 0.5, 1.5])))
if __name__ == "__main__":
unittest.main()
|
joewie/PySyft
|
tests/test_tensor.py
|
Python
|
apache-2.0
| 45,863
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import argparse
import time
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
from paddle.fluid import core
import unittest
from multiprocessing import Process
import os
import sys
import signal
# Fix seed for test
fluid.default_startup_program().random_seed = 1
fluid.default_main_program().random_seed = 1
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
}
}
class SE_ResNeXt():
def __init__(self, layers=50):
self.params = train_parameters
self.layers = layers
def net(self, input, class_dim=1000):
layers = self.layers
supported_layers = [50, 101, 152]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(supported_layers, layers)
if layers == 50:
cardinality = 32
reduction_ratio = 16
depth = [3, 4, 6, 3]
num_filters = [128, 256, 512, 1024]
conv = self.conv_bn_layer(
input=input,
num_filters=64,
filter_size=7,
stride=2,
act='relu')
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
elif layers == 101:
cardinality = 32
reduction_ratio = 16
depth = [3, 4, 23, 3]
num_filters = [128, 256, 512, 1024]
conv = self.conv_bn_layer(
input=input,
num_filters=64,
filter_size=7,
stride=2,
act='relu')
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
elif layers == 152:
cardinality = 64
reduction_ratio = 16
depth = [3, 8, 36, 3]
num_filters = [128, 256, 512, 1024]
conv = self.conv_bn_layer(
input=input,
num_filters=64,
filter_size=3,
stride=2,
act='relu')
conv = self.conv_bn_layer(
input=conv, num_filters=64, filter_size=3, stride=1, act='relu')
conv = self.conv_bn_layer(
input=conv,
num_filters=128,
filter_size=3,
stride=1,
act='relu')
conv = fluid.layers.pool2d(
input=conv, pool_size=3, pool_stride=2, pool_padding=1, \
pool_type='max')
for block in range(len(depth)):
for i in range(depth[block]):
conv = self.bottleneck_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
cardinality=cardinality,
reduction_ratio=reduction_ratio)
pool = fluid.layers.pool2d(
input=conv, pool_size=7, pool_type='avg', global_pooling=True)
drop = fluid.layers.dropout(x=pool, dropout_prob=0.2)
stdv = 1.0 / math.sqrt(drop.shape[1] * 1.0)
out = fluid.layers.fc(input=drop, size=class_dim, act='softmax')
return out
def shortcut(self, input, ch_out, stride):
ch_in = input.shape[1]
if ch_in != ch_out or stride != 1:
filter_size = 1
return self.conv_bn_layer(input, ch_out, filter_size, stride)
else:
return input
def bottleneck_block(self, input, num_filters, stride, cardinality,
reduction_ratio):
conv0 = self.conv_bn_layer(
input=input, num_filters=num_filters, filter_size=1, act='relu')
conv1 = self.conv_bn_layer(
input=conv0,
num_filters=num_filters,
filter_size=3,
stride=stride,
groups=cardinality,
act='relu')
conv2 = self.conv_bn_layer(
input=conv1, num_filters=num_filters * 2, filter_size=1, act=None)
scale = self.squeeze_excitation(
input=conv2,
num_channels=num_filters * 2,
reduction_ratio=reduction_ratio)
short = self.shortcut(input, num_filters * 2, stride)
return fluid.layers.elementwise_add(x=short, y=scale, act='relu')
def conv_bn_layer(self,
input,
num_filters,
filter_size,
stride=1,
groups=1,
act=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) / 2,
groups=groups,
act=None,
# avoid pserver CPU init differs from GPU
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant()),
bias_attr=False)
return fluid.layers.batch_norm(input=conv, act=act)
def squeeze_excitation(self, input, num_channels, reduction_ratio):
pool = fluid.layers.pool2d(
input=input, pool_size=0, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
squeeze = fluid.layers.fc(input=pool,
size=num_channels / reduction_ratio,
act='relu')
stdv = 1.0 / math.sqrt(squeeze.shape[1] * 1.0)
excitation = fluid.layers.fc(input=squeeze,
size=num_channels,
act='sigmoid')
scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0)
return scale
def get_model(batch_size):
# Input data
image = fluid.layers.data(name="data", shape=[3, 224, 224], dtype='float32')
label = fluid.layers.data(name="int64", shape=[1], dtype='int64')
# Train program
model = SE_ResNeXt(layers=50)
out = model.net(input=image, class_dim=102)
cost = fluid.layers.cross_entropy(input=out, label=label)
avg_cost = fluid.layers.mean(x=cost)
acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
# Evaluator
test_program = fluid.default_main_program().clone(for_test=True)
# Optimization
total_images = 6149 # flowers
epochs = [30, 60, 90]
step = int(total_images / batch_size + 1)
bd = [step * e for e in epochs]
base_lr = 0.1
lr = []
lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
optimizer = fluid.optimizer.Momentum(
# FIXME(typhoonzero): add back LR decay once ParallelExecutor fixed.
#learning_rate=fluid.layers.piecewise_decay(
# boundaries=bd, values=lr),
learning_rate=base_lr,
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
optimizer.minimize(avg_cost)
# Reader
train_reader = paddle.batch(
paddle.dataset.flowers.train(), batch_size=batch_size)
test_reader = paddle.batch(
paddle.dataset.flowers.test(use_xmap=False), batch_size=batch_size)
return test_program, avg_cost, train_reader, test_reader, acc_top1, out
def get_transpiler(trainer_id, main_program, pserver_endpoints, trainers):
t = fluid.DistributeTranspiler()
t.transpile(
trainer_id=trainer_id,
program=main_program,
pservers=pserver_endpoints,
trainers=trainers)
return t
class DistSeResneXt2x2:
def run_pserver(self, pserver_endpoints, trainers, current_endpoint,
trainer_id):
get_model(batch_size=2)
t = get_transpiler(trainer_id,
fluid.default_main_program(), pserver_endpoints,
trainers)
pserver_prog = t.get_pserver_program(current_endpoint)
startup_prog = t.get_startup_program(current_endpoint, pserver_prog)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
exe.run(pserver_prog)
def _wait_ps_ready(self, pid):
retry_times = 20
while True:
assert retry_times >= 0, "wait ps ready failed"
time.sleep(3)
print("waiting ps ready: ", pid)
try:
# the listen_and_serv_op would touch a file which contains the listen port
# on the /tmp directory until it was ready to process all the RPC call.
os.stat("/tmp/paddle.%d.port" % pid)
return
except os.error:
retry_times -= 1
def run_trainer(self, place, endpoints, trainer_id, trainers, is_dist=True):
test_program, avg_cost, train_reader, test_reader, batch_acc, predict = get_model(
batch_size=2)
if is_dist:
t = get_transpiler(trainer_id,
fluid.default_main_program(), endpoints,
trainers)
trainer_prog = t.get_trainer_program()
else:
trainer_prog = fluid.default_main_program()
startup_exe = fluid.Executor(place)
startup_exe.run(fluid.default_startup_program())
strategy = fluid.ExecutionStrategy()
strategy.num_threads = 1
strategy.allow_op_delay = False
exe = fluid.ParallelExecutor(
True, loss_name=avg_cost.name, exec_strategy=strategy)
feed_var_list = [
var for var in trainer_prog.global_block().vars.itervalues()
if var.is_data
]
feeder = fluid.DataFeeder(feed_var_list, place)
reader_generator = test_reader()
data = next(reader_generator)
first_loss, = exe.run(fetch_list=[avg_cost.name],
feed=feeder.feed(data))
print(first_loss)
for i in xrange(5):
data = next(reader_generator)
loss, = exe.run(fetch_list=[avg_cost.name], feed=feeder.feed(data))
data = next(reader_generator)
last_loss, = exe.run(fetch_list=[avg_cost.name], feed=feeder.feed(data))
print(last_loss)
def main(role="pserver",
endpoints="127.0.0.1:9123",
trainer_id=0,
current_endpoint="127.0.0.1:9123",
trainers=1,
is_dist=True):
model = DistSeResneXt2x2()
if role == "pserver":
model.run_pserver(endpoints, trainers, current_endpoint, trainer_id)
else:
p = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
model.run_trainer(p, endpoints, trainer_id, trainers, is_dist)
if __name__ == "__main__":
if len(sys.argv) != 7:
print(
"Usage: python dist_se_resnext.py [pserver/trainer] [endpoints] [trainer_id] [current_endpoint] [trainers] [is_dist]"
)
role = sys.argv[1]
endpoints = sys.argv[2]
trainer_id = int(sys.argv[3])
current_endpoint = sys.argv[4]
trainers = int(sys.argv[5])
is_dist = True if sys.argv[6] == "TRUE" else False
main(
role=role,
endpoints=endpoints,
trainer_id=trainer_id,
current_endpoint=current_endpoint,
trainers=trainers,
is_dist=is_dist)
|
jacquesqiao/Paddle
|
python/paddle/fluid/tests/unittests/dist_se_resnext.py
|
Python
|
apache-2.0
| 12,325
|
""" Copyright 2013 Board of Trustees, University of Illinois
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Description
=================================================================
Middleware that looks at a response and removes listed headers
from the Vary header. This is to change how certain views are cached.
This middleware must go just after the UpdateCacheMiddleware.
"""
from django.http import HttpResponse
from functools import wraps
import re
HEADER_UNPATCH_VARY = 'X-Unpatch-Vary'
def unpatch_vary_headers(response, headers):
"""Add the headers to the list of unpatch vary headers.
"""
unpatch_headers = re.split(r'\s*,\s*',
response.get(HEADER_UNPATCH_VARY, ''))
unpatch_headers.extend(headers)
response[HEADER_UNPATCH_VARY] = ','.join(unpatch_headers)
return response
class UnpatchVaryMiddleware(object):
def process_response(self, request, response):
""" See if we have any headers to remove from Vary,
and do such!
"""
if not (response and response.has_header(HEADER_UNPATCH_VARY)):
return response
unpatch_headers = re.split(r'\s*,\s*', response[HEADER_UNPATCH_VARY])
del response[HEADER_UNPATCH_VARY]
if len(unpatch_headers) == 0:
return response
if not response.has_header('Vary'):
return response
vary_headers = re.split(r'\s*,\s*', response['Vary'])
# Try to preserve the case of headers, but still match
# insensitively
existing_headers = dict((h.lower(), h) for h in vary_headers)
for header in unpatch_headers:
header = header.lower()
if header in existing_headers:
del existing_headers[header]
response['Vary'] = ', '.join(existing_headers.values())
return response
|
uw-it-aca/spacescout_web
|
spacescout_web/middleware/unpatch_vary.py
|
Python
|
apache-2.0
| 2,396
|
from django.test import TestCase
from django.utils.timezone import now
from civil_registry.tests.factories import CitizenFactory
from civil_registry.utils import get_citizen_by_national_id, is_valid_fbr_number
class GetCitizenByNIDTest(TestCase):
def test_basic_get(self):
citizen = CitizenFactory()
citizen2 = get_citizen_by_national_id(citizen.national_id)
self.assertEqual(citizen.pk, citizen2.pk)
def test_no_such_citizen(self):
citizen = get_citizen_by_national_id(99)
self.assertIsNone(citizen)
def test_missing_citizen(self):
citizen = CitizenFactory(missing=now())
citizen2 = get_citizen_by_national_id(citizen.national_id)
self.assertIsNone(citizen2)
class ValidFBRNumberTest(TestCase):
def test_integer_valid(self):
self.assertTrue(is_valid_fbr_number(123))
def test_string_of_digits_valid(self):
self.assertTrue(is_valid_fbr_number('123'))
def test_string_of_characters_then_digits_valid(self):
self.assertTrue(is_valid_fbr_number('a123'))
self.assertTrue(is_valid_fbr_number('foo123'))
def test_invalid(self):
# trailing characters invalid
self.assertFalse(is_valid_fbr_number('foo123foo'))
# no digits invalid
self.assertFalse(is_valid_fbr_number('foo'))
self.assertFalse(is_valid_fbr_number('a'))
# nonleading characters
self.assertFalse(is_valid_fbr_number('foo1bar2'))
# non alphabetic characters
self.assertFalse(is_valid_fbr_number('#123'))
self.assertFalse(is_valid_fbr_number('#$%-'))
# whitespace
self.assertFalse(is_valid_fbr_number('123 456'))
|
SmartElect/SmartElect
|
civil_registry/tests/test_utils.py
|
Python
|
apache-2.0
| 1,698
|
"""App Decorators Module."""
# third-party
import wrapt
class WriteOutput:
"""Write the App output variables to Redis.
This decorator will take the functions return value and write the data to Redis using the
key and variable_type. An optional hard coded value can be passed, which will override the
return value. If multiple value are provided for the same output variable there is an option
to overwrite the previous value.
This decorator is intended for very simple Apps. Using the `write_output()` method of the App
template is the recommended way of writing output data.
.. code-block:: python
:linenos:
:lineno-start: 1
@WriteOutput(key='color', variable_type='String')
def my_method(color):
return color.lowercase()
Args:
key (str): The name of the playbook output variable.
variable_type (str): The type for the playbook output variable. Supported types are:
String, Binary, KeyValue, TCEntity, TCEnhancedEntity, StringArray,
BinaryArray, KeyValueArray, TCEntityArray, TCEnhancedEntityArray.
default (str): If the method return is None use the provided value as a default.
overwrite (bool): When True and more than one value is provided for the same variable
the previous value will be overwritten.
"""
def __init__(self, key, variable_type, default=None, overwrite=True):
"""Initialize Class properties"""
self.key = key
self.overwrite = overwrite
self.default = default
self.variable_type = variable_type
@wrapt.decorator
def __call__(self, wrapped, instance, args, kwargs):
"""Implement __call__ function for decorator.
Args:
wrapped (callable): The wrapped function which in turns
needs to be called by your wrapper function.
instance (App): The object to which the wrapped
function was bound when it was called.
args (list): The list of positional arguments supplied
when the decorated function was called.
kwargs (dict): The dictionary of keyword arguments
supplied when the decorated function was called.
Returns:
function: The custom decorator function.
"""
def output(app, *args, **kwargs):
"""Call the function and store or append return value.
Args:
app (class): The instance of the App class "self".
"""
data = wrapped(*args, **kwargs)
if data is None and self.default is not None:
data = self.default
index = f'{self.key}-{self.variable_type}'
if app.tcex.playbook.output_data.get(index) and not self.overwrite:
# skip data since a previous value has already been written
pass
else:
# store data returned by function call or default
app.tcex.playbook.add_output(self.key, data, self.variable_type)
return data
return output(instance, *args, **kwargs)
|
kstilwell/tcex
|
tcex/decorators/write_output.py
|
Python
|
apache-2.0
| 3,168
|
#!/usr/bin/env python3
# Copyright 2013 The Font Bakery Authors.
# Copyright 2017 The Google Font Tools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# See AUTHORS.txt for the list of Authors and LICENSE.txt for the License.
#
import argparse
from gftools.fix import GaspFixer
description = 'Fixes TTF GASP table'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('ttf_font', nargs='+',
help="Font in OpenType (TTF/OTF) format")
parser.add_argument('--autofix', action='store_true', help='Apply autofix')
parser.add_argument('--set', type=int,
help=('Change gasprange value of key 65535'
' to new value'), default=None)
def main():
args = parser.parse_args()
for path in args.ttf_font:
if args.set is not None:
GaspFixer(path).fix(args.set)
elif args.autofix:
GaspFixer(path).fix()
else:
GaspFixer(path).show()
if __name__ == '__main__':
main()
|
googlefonts/gftools
|
bin/gftools-fix-gasp.py
|
Python
|
apache-2.0
| 1,523
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START automl_cancel_operation]
from google.cloud import automl_v1beta1
def sample_cancel_operation(project, operation_id):
"""
Cancel Long-Running Operation
Args:
project Required. Your Google Cloud Project ID.
operation_id Required. The ID of the Operation.
"""
client = automl_v1beta1.AutoMlClient()
operations_client = client._transport.operations_client
# project = '[Google Cloud Project ID]'
# operation_id = '[Operation ID]'
name = "projects/{}/locations/us-central1/operations/{}".format(
project, operation_id
)
operations_client.cancel_operation(name)
print(u"Cancelled operation: {}".format(name))
# [END automl_cancel_operation]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--project", type=str, default="[Google Cloud Project ID]")
parser.add_argument("--operation_id", type=str, default="[Operation ID]")
args = parser.parse_args()
sample_cancel_operation(args.project, args.operation_id)
if __name__ == "__main__":
main()
|
googleapis/python-automl
|
samples/beta/cancel_operation.py
|
Python
|
apache-2.0
| 1,695
|
from testtools import TestCase
import json
import falcon
from tests.utils import request_factory, text_data_factory, json_data_factory
from rider.views import DataView, StreamView, TextView, HtmlView, JsonView, ViewSet
from rider.http import Http404, HttpRedirect, HttpPermanentRedirect
def get_text_views(text_data):
class TestTextView(TextView):
def get(self, request):
return text_data
class TestTextView404(TextView):
def get(self, request):
raise Http404(text_data)
@TextView
def test_text_view(request):
return text_data
@TextView
def test_text_view_404(request):
raise Http404(text_data)
return TestTextView, TestTextView404, test_text_view, test_text_view_404
def get_html_views(html_data):
class TestHtmlView(HtmlView):
def get(self, request):
return html_data
class TestHtmlView404(HtmlView):
def get(self, request):
raise Http404(html_data)
@HtmlView
def test_html_view(request):
return html_data
@HtmlView
def test_html_view_404(request):
raise Http404(html_data)
return TestHtmlView, TestHtmlView404, test_html_view, test_html_view_404
def get_json_views(json_data):
class TestJsonView404(JsonView):
def get(self, request):
raise Http404(json_data)
class TestJsonView(JsonView):
def get(self, request):
return json_data
@JsonView
def test_json_view(request):
return json_data
@JsonView
def test_json_view_404(request):
raise Http404(json_data)
return TestJsonView, TestJsonView404, test_json_view, test_json_view_404
#def get_stream_views(location):
#class TestStreamView404(JsonView):
#def get(self, request):
#raise Http404(json_data)
#class TestStreamView(JsonView):
#def get(self, request):
#return json_data
#@JsonView
#def test_stream_view(request):
#return json_data
#@JsonView
#def test_stream_view_404(request):
#raise Http404(json_data)
#return TestJsonView404, TestJsonView, test_json_view, test_json_view_404
def get_redirect_views(location):
result = []
for view_cls in (DataView, StreamView, TextView, HtmlView, JsonView):
class TestRedirectView(view_cls):
def get(self, request):
raise HttpRedirect(location)
yield TestRedirectView, falcon.HTTP_302
class TestPermanentRedirectView(view_cls):
def get(self, request):
raise HttpPermanentRedirect(location)
yield TestPermanentRedirectView, falcon.HTTP_301
def get_viewsets(text_data, html_data, json_data):
class TestViewSet(ViewSet):
@route('a')
@TextView
def text(self, request):
return text_data
@route('b')
@HtmlView
def html(self, request):
return html_data
@route('c')
@JsonView
def json(self, request):
return json_data
return TestViewSet
class TestViews(TestCase):
"""Tests for views"""
def _test_view(self, test_view_cls, request, expected_result, expected_content_type):
view = test_view_cls()
response = falcon.Response()
view.on_get(request, response)
result = falcon.api_helpers.get_body(response)
self.assertEqual(expected_result, result[0].decode('utf-8'))
self.assertEqual(response.content_type, expected_content_type)
def test_text_views(self):
text_data = text_data_factory()
request = request_factory(url='/')
for text_view_cls in get_text_views(text_data):
self._test_view(text_view_cls, request, text_data, 'text/plain')
def test_html_views(self):
text_data = text_data_factory()
request = request_factory(url='/')
for html_view_cls in get_html_views(text_data):
self._test_view(html_view_cls, request, text_data, 'text/html')
def test_json_views(self):
json_data = json_data_factory()
string_json_data = json_data_factory.as_string()
request = request_factory(url='/')
for json_view_cls in get_json_views(json_data):
self._test_view(json_view_cls, request, string_json_data, 'application/json')
def test_redirect_views(self):
location = '/new_location'
request = request_factory(url='/')
for redirect_view_cls, status in get_redirect_views(location):
redirect_view = redirect_view_cls()
response = falcon.Response()
redirect_view.on_get(request, response)
self.assertEqual(location, response.location)
self.assertEqual(response.status, status)
|
riderframework/rider
|
tests/views.py
|
Python
|
apache-2.0
| 4,806
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
from pyro.distributions import Categorical
from pyro.distributions.torch_distribution import TorchDistributionMixin
from pyro.ops.indexing import Vindex
from pyro.util import ignore_jit_warnings
from .messenger import Messenger
from .runtime import _ENUM_ALLOCATOR
def _tmc_mixture_sample(msg):
dist, num_samples = msg["fn"], msg["infer"].get("num_samples")
# find batch dims that aren't plate dims
batch_shape = [1] * len(dist.batch_shape)
for f in msg["cond_indep_stack"]:
if f.vectorized:
batch_shape[f.dim] = f.size if f.size > 0 else dist.batch_shape[f.dim]
batch_shape = tuple(batch_shape)
# sample a batch
sample_shape = (num_samples,)
fat_sample = dist(sample_shape=sample_shape) # TODO thin before sampling
assert fat_sample.shape == sample_shape + dist.batch_shape + dist.event_shape
assert any(d > 1 for d in fat_sample.shape)
target_shape = (num_samples,) + batch_shape + dist.event_shape
# if this site has any possible ancestors, sample ancestor indices uniformly
thin_sample = fat_sample
if thin_sample.shape != target_shape:
index = [Ellipsis] + [slice(None)] * (len(thin_sample.shape) - 1)
squashed_dims = []
for squashed_dim, squashed_size in zip(
range(1, len(thin_sample.shape)), thin_sample.shape[1:]
):
if squashed_size > 1 and (
target_shape[squashed_dim] == 1 or squashed_dim == 0
):
# uniformly sample one ancestor per upstream particle population
ancestor_dist = Categorical(
logits=torch.zeros((squashed_size,), device=thin_sample.device)
)
ancestor_index = ancestor_dist.sample(sample_shape=(num_samples,))
index[squashed_dim] = ancestor_index
squashed_dims.append(squashed_dim)
thin_sample = Vindex(thin_sample)[tuple(index)]
for squashed_dim in squashed_dims:
thin_sample = thin_sample.unsqueeze(squashed_dim)
assert thin_sample.shape == target_shape
return thin_sample
def _tmc_diagonal_sample(msg):
dist, num_samples = msg["fn"], msg["infer"].get("num_samples")
# find batch dims that aren't plate dims
batch_shape = [1] * len(dist.batch_shape)
for f in msg["cond_indep_stack"]:
if f.vectorized:
batch_shape[f.dim] = f.size if f.size > 0 else dist.batch_shape[f.dim]
batch_shape = tuple(batch_shape)
# sample a batch
sample_shape = (num_samples,)
fat_sample = dist(sample_shape=sample_shape) # TODO thin before sampling
assert fat_sample.shape == sample_shape + dist.batch_shape + dist.event_shape
assert any(d > 1 for d in fat_sample.shape)
target_shape = (num_samples,) + batch_shape + dist.event_shape
# if this site has any ancestors, choose ancestors from diagonal approximation
thin_sample = fat_sample
if thin_sample.shape != target_shape:
index = [Ellipsis] + [slice(None)] * (len(thin_sample.shape) - 1)
squashed_dims = []
for squashed_dim, squashed_size in zip(
range(1, len(thin_sample.shape)), thin_sample.shape[1:]
):
if squashed_size > 1 and (
target_shape[squashed_dim] == 1 or squashed_dim == 0
):
# diagonal approximation: identify particle indices across populations
ancestor_index = torch.arange(squashed_size, device=thin_sample.device)
index[squashed_dim] = ancestor_index
squashed_dims.append(squashed_dim)
thin_sample = Vindex(thin_sample)[tuple(index)]
for squashed_dim in squashed_dims:
thin_sample = thin_sample.unsqueeze(squashed_dim)
assert thin_sample.shape == target_shape
return thin_sample
def enumerate_site(msg):
dist = msg["fn"]
num_samples = msg["infer"].get("num_samples", None)
if num_samples is None:
# Enumerate over the support of the distribution.
value = dist.enumerate_support(expand=msg["infer"].get("expand", False))
elif num_samples > 1 and not msg["infer"].get("expand", False):
tmc_strategy = msg["infer"].get("tmc", "diagonal")
if tmc_strategy == "mixture":
value = _tmc_mixture_sample(msg)
elif tmc_strategy == "diagonal":
value = _tmc_diagonal_sample(msg)
else:
raise ValueError("{} not a valid TMC strategy".format(tmc_strategy))
elif num_samples > 1 and msg["infer"]["expand"]:
# Monte Carlo sample the distribution.
value = dist(sample_shape=(num_samples,))
assert value.dim() == 1 + len(dist.batch_shape) + len(dist.event_shape)
return value
class EnumMessenger(Messenger):
"""
Enumerates in parallel over discrete sample sites marked
``infer={"enumerate": "parallel"}``.
:param int first_available_dim: The first tensor dimension (counting
from the right) that is available for parallel enumeration. This
dimension and all dimensions left may be used internally by Pyro.
This should be a negative integer or None.
"""
def __init__(self, first_available_dim=None):
assert (
first_available_dim is None or first_available_dim < 0
), first_available_dim
self.first_available_dim = first_available_dim
super().__init__()
def __enter__(self):
if self.first_available_dim is not None:
_ENUM_ALLOCATOR.set_first_available_dim(self.first_available_dim)
self._markov_depths = {} # site name -> depth (nonnegative integer)
self._param_dims = {} # site name -> (enum dim -> unique id)
self._value_dims = {} # site name -> (enum dim -> unique id)
return super().__enter__()
@ignore_jit_warnings()
def _pyro_sample(self, msg):
"""
:param msg: current message at a trace site.
:returns: a sample from the stochastic function at the site.
"""
if msg["done"] or not isinstance(msg["fn"], TorchDistributionMixin):
return
# Compute upstream dims in scope; these are unsafe to use for this site's target_dim.
scope = msg["infer"].get("_markov_scope") # site name -> markov depth
param_dims = _ENUM_ALLOCATOR.dim_to_id.copy() # enum dim -> unique id
if scope is not None:
for name, depth in scope.items():
if (
self._markov_depths[name] == depth
): # hide sites whose markov context has exited
param_dims.update(self._value_dims[name])
self._markov_depths[msg["name"]] = msg["infer"]["_markov_depth"]
self._param_dims[msg["name"]] = param_dims
if msg["is_observed"] or msg["infer"].get("enumerate") != "parallel":
return
# Compute an enumerated value (at an arbitrary dim).
value = enumerate_site(msg)
actual_dim = -1 - len(msg["fn"].batch_shape) # the leftmost dim of log_prob
# Move actual_dim to a safe target_dim.
target_dim, id_ = _ENUM_ALLOCATOR.allocate(
None if scope is None else param_dims
)
event_dim = msg["fn"].event_dim
categorical_support = getattr(value, "_pyro_categorical_support", None)
if categorical_support is not None:
# Preserve categorical supports to speed up Categorical.log_prob().
# See pyro/distributions/torch.py for details.
assert target_dim < 0
value = value.reshape(value.shape[:1] + (1,) * (-1 - target_dim))
value._pyro_categorical_support = categorical_support
elif actual_dim < target_dim:
assert (
value.size(target_dim - event_dim) == 1
), "pyro.markov dim conflict at dim {}".format(actual_dim)
value = value.transpose(target_dim - event_dim, actual_dim - event_dim)
while value.dim() and value.size(0) == 1:
value = value.squeeze(0)
elif target_dim < actual_dim:
diff = actual_dim - target_dim
value = value.reshape(value.shape[:1] + (1,) * diff + value.shape[1:])
# Compute dims passed downstream through the value.
value_dims = {
dim: param_dims[dim]
for dim in range(event_dim - value.dim(), 0)
if value.size(dim - event_dim) > 1 and dim in param_dims
}
value_dims[target_dim] = id_
msg["infer"]["_enumerate_dim"] = target_dim
msg["infer"]["_dim_to_id"] = value_dims
msg["value"] = value
msg["done"] = True
def _pyro_post_sample(self, msg):
# Save all dims exposed in this sample value.
# Whereas all of site["_dim_to_id"] are needed to interpret a
# site's log_prob tensor, only a filtered subset self._value_dims[msg["name"]]
# are needed to interpret a site's value.
if not isinstance(msg["fn"], TorchDistributionMixin):
return
value = msg["value"]
if value is None:
return
shape = value.data.shape[: value.dim() - msg["fn"].event_dim]
dim_to_id = msg["infer"].setdefault("_dim_to_id", {})
dim_to_id.update(self._param_dims.get(msg["name"], {}))
with ignore_jit_warnings():
self._value_dims[msg["name"]] = {
dim: id_
for dim, id_ in dim_to_id.items()
if len(shape) >= -dim and shape[dim] > 1
}
|
uber/pyro
|
pyro/poutine/enum_messenger.py
|
Python
|
apache-2.0
| 9,673
|
"""
Copyright 2014 Adam Schnitzer
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tweepy import *
import ConfigParser
import json
import pprint
import random
import string
def get_reply(text):
"""
Returns a reply to a given string.
"""
# split into list of words in lowercase with punctuation removed
words = [w.strip(string.punctuation) for w in text.lower().split()]
# a list of responses which can be used for any question
general_responses = ['try asking on Piazza!',
'I recommend you go to office hours #NotMyProblem',
'did you try googling that?',
"I'm not sure! ask Maxim!",
'Try re-reading the lecture slides',
'This was covered in lecture',
'Search Piazza!',
'We will cover that in class later..',
'just remember, global variables are the root of all evilll!!',
]
if 'autograder' in words:
# returns a random element from the combination of the specific responses
# for 'autograder', in addition to all general responses.
return random.choice(['uh oh.. I think we broke it..',
"it's not a bug! just an undocumented feature!",
] + general_responses)
elif 'extension' in words:
return random.choice(['yeah! you can get an extension.. when hell freezes over!! #SorryNotSorry',
])
elif 'office' in words and 'hours' in words:
return random.choice(['current wait time: 2 weeks 5 days #subtweet #goaway',
] + general_responses)
elif 'codelab' in words:
return random.choice(['we have no clue what it means either!',
"correct solutions don't usually use the 'return' keyword #yousosilly",
] + general_responses)
elif 'style' in words:
return random.choice(['ask Eva!!',
"don't look at me! I only got 5/10 for style!",
"it's a magic number",
"this code is unreadable!",
] + general_responses)
elif ('address' in words and 'sanitizer' in words) or 'asan' in words:
return random.choice([
'I feel bad for ya son, I got 99 problems but an out of bounds access aint one!"',
] + general_responses)
else:
return random.choice(general_responses)
def read_config():
"""
Reads in the API_Keys from the configuration file, and returns as a dict.
"""
config = ConfigParser.ConfigParser()
config.read('183help.cfg')
# this is a dictionary comprehension to return the config as key-value pairs.
return {key: val for (key, val) in config.items('API_Keys')}
class Listener183(StreamListener):
"""
This is a listener class which we specialize to call the get_reply function,
and print out any data which is received.
"""
def on_data(self, raw_data):
data = json.loads(raw_data)
# The new version of the Twitter streaming API initially responds
# with a friends list, which tweepy doesn't handle correctly in this
# version. So this is a hack to swallow that!
if not 'friends' in data:
return super(Listener183, self).on_data(raw_data)
def on_status(self, status):
# Make sure we don't reply to ourself!
if status.author.id == self.api.me().id:
return
print 'status:'
pprint.pprint(vars(status))
# Create a response and reply
response = '@{0} {1}'.format(status.author.screen_name, get_reply(status.text))
pprint.pprint(vars(self.api.update_status(status=response, in_reply_to_status_id=status.id_str)))
def on_event(self, status):
print 'event:'
pprint.pprint(vars(status))
if __name__ == '__main__':
# authenticate using the credentials in the config file
keys = read_config()
auth = OAuthHandler(keys['consumer_key'], keys['consumer_secret'])
auth.set_access_token(keys['access_token'], keys['access_token_secret'])
# create a stream using credentials, and begin the stream
l = Listener183(api=API(auth))
stream = Stream(auth, l)
try:
stream.userstream()
except KeyboardInterrupt:
stream.disconnect()
|
adamsc/183help-twitter
|
183help_twitter/eecs183help_twitter.py
|
Python
|
apache-2.0
| 4,698
|
"""Support for Velbus light."""
import logging
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_FLASH,
ATTR_TRANSITION,
FLASH_LONG,
FLASH_SHORT,
SUPPORT_BRIGHTNESS,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
LightEntity,
)
from . import VelbusEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Velbus switch based on config_entry."""
await hass.data[DOMAIN][entry.entry_id]["tsk"]
cntrl = hass.data[DOMAIN][entry.entry_id]["cntrl"]
entities = []
for channel in cntrl.get_all("light"):
entities.append(VelbusLight(channel, False))
for channel in cntrl.get_all("led"):
entities.append(VelbusLight(channel, True))
async_add_entities(entities)
class VelbusLight(VelbusEntity, LightEntity):
"""Representation of a Velbus light."""
def __init__(self, channel, led):
"""Initialize a light Velbus entity."""
super().__init__(channel)
self._is_led = led
@property
def name(self):
"""Return the display name of this entity."""
if self._is_led:
return f"LED {self._channel.get_name()}"
return self._channel.get_name()
@property
def supported_features(self):
"""Flag supported features."""
if self._is_led:
return SUPPORT_FLASH
return SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION
@property
def is_on(self):
"""Return true if the light is on."""
return self._channel.is_on()
@property
def brightness(self):
"""Return the brightness of the light."""
return int((self._channel.get_dimmer_state() * 255) / 100)
async def async_turn_on(self, **kwargs):
"""Instruct the Velbus light to turn on."""
if self._is_led:
if ATTR_FLASH in kwargs:
if kwargs[ATTR_FLASH] == FLASH_LONG:
attr, *args = "set_led_state", "slow"
elif kwargs[ATTR_FLASH] == FLASH_SHORT:
attr, *args = "set_led_state", "fast"
else:
attr, *args = "set_led_state", "on"
else:
attr, *args = "set_led_state", "on"
else:
if ATTR_BRIGHTNESS in kwargs:
# Make sure a low but non-zero value is not rounded down to zero
if kwargs[ATTR_BRIGHTNESS] == 0:
brightness = 0
else:
brightness = max(int((kwargs[ATTR_BRIGHTNESS] * 100) / 255), 1)
attr, *args = (
"set_dimmer_state",
brightness,
kwargs.get(ATTR_TRANSITION, 0),
)
else:
attr, *args = (
"restore_dimmer_state",
kwargs.get(ATTR_TRANSITION, 0),
)
await getattr(self._channel, attr)(*args)
async def async_turn_off(self, **kwargs):
"""Instruct the velbus light to turn off."""
if self._is_led:
attr, *args = "set_led_state", "off"
else:
attr, *args = (
"set_dimmer_state",
0,
kwargs.get(ATTR_TRANSITION, 0),
)
await getattr(self._channel, attr)(*args)
|
Danielhiversen/home-assistant
|
homeassistant/components/velbus/light.py
|
Python
|
apache-2.0
| 3,383
|
import logging
from multiprocessing import Event
from time import sleep
from SimpleOplogGetterThread import SimpleOplogGetterThread
from mongodb_consistent_backup.Errors import OperationError
from mongodb_consistent_backup.Oplog import OplogState
from mongodb_consistent_backup.Oplog.Common.OplogTask import OplogTask
class SimpleOplogGetter(OplogTask):
def __init__(self, manager, config, timer, base_dir, backup_dir, replsets, backup_stop):
super(SimpleOplogGetter, self).__init__(manager, config, timer, base_dir, backup_dir, replsets, backup_stop)
self.worker_threads = []
self.backup_summary = {}
def run(self):
if not self.enabled():
logging.info("Oplog getter is disabled, skipping")
return
logging.info("Starting oplog getter for all replica sets (options: compression=%s, status_secs=%i)" % (self.compression(), self.status_secs))
self.timer.start(self.timer_name)
if len(self.backup_summary) == 0:
raise OperationError("Oplogs cannot gathered without a successful backup first.")
# Determine the time when the last shard completed its backup, because we need all changes
# across all other shards since whenever they finished until then
logging.debug("Finding latest finished backup timestamp")
need_changes_until_ts = None
for shard in self.replsets:
ts = self.backup_summary[shard].get('last_ts')
logging.debug("Shard %s's has changes up to %s" % (shard, ts))
if need_changes_until_ts is None or ts > need_changes_until_ts:
need_changes_until_ts = ts
logging.info("Getting oplogs for all shards up to %s" % need_changes_until_ts)
for shard in self.replsets:
getter_stop = Event()
secondary = self.replsets[shard].find_secondary()
mongo_uri = secondary['uri']
shard_name = mongo_uri.replset
need_changes_since_ts = self.backup_summary[shard].get('last_ts')
oplog_file = self.prepare_oplog_files(shard_name)
oplog_state = OplogState(self.manager, mongo_uri, oplog_file)
thread = SimpleOplogGetterThread(
self.backup_stop,
getter_stop,
mongo_uri,
self.config,
self.timer,
oplog_file,
oplog_state,
self.do_gzip(),
need_changes_since_ts,
need_changes_until_ts
)
self.shards[shard] = {
'stop': getter_stop,
'thread': thread,
'state': oplog_state
}
self.worker_threads.append(thread)
logging.debug("Starting thread %s to write %s oplog to %s" % (thread.name, mongo_uri, oplog_file))
thread.start()
# Wait for all threads to complete
self.wait()
# Wait would have thrown an error is not all of them completed
# normally.
self.completed = True
self.stopped = True
self.get_summaries()
return self._summary
def wait(self):
completed = 0
start_threads = len(self.worker_threads)
# wait for all threads to finish
logging.debug("Waiting for %d oplog threads to finish" % start_threads)
while len(self.worker_threads) > 0:
if self.backup_stop and self.backup_stop.is_set():
logging.error("Received backup stop event due to error(s), stopping backup!")
raise OperationError("Received backup stop event due to error(s)")
for thread in self.worker_threads:
if not thread.is_alive():
logging.debug("Thread %s exited with code %d" % (thread, thread.exitcode))
if thread.exitcode == 0:
completed += 1
self.worker_threads.remove(thread)
else:
logging.debug("Waiting for %s to finish" % thread.name)
sleep(1)
# check if all threads completed
if completed == start_threads:
logging.info("All oplog threads completed successfully")
self.timer.stop(self.timer_name)
else:
raise OperationError("%d oplog getter threads failed to complete successfully!" % (start_threads - completed))
def stop(self, kill=False, sleep_secs=3):
if not self.enabled():
return
logging.info("Stopping all oplog tailers")
for shard in self.shards:
state = self.shards[shard]['state']
thread = self.shards[shard]['thread']
# set thread stop event
self.shards[shard]['stop'].set()
if kill:
thread.terminate()
sleep(1)
# wait for thread to stop
while thread.is_alive():
logging.info('Waiting for %s getter stop' % thread.name)
sleep(sleep_secs)
# gather state info
self._summary[shard] = state.get().copy()
self.timer.stop(self.timer_name)
logging.info("Oplog getter completed in %.2f seconds" % self.timer.duration(self.timer_name))
return self._summary
|
Percona-Lab/mongodb_consistent_backup
|
mongodb_consistent_backup/Oplog/SimpleOplogGetter/SimpleOplogGetter.py
|
Python
|
apache-2.0
| 5,350
|
import json
import logging
import importlib
import sys
from oic.extension.token import JWTToken
from oic.utils.authn.authn_context import AuthnBroker
from oic.utils.authn.client import verify_client
from oic.utils.authz import AuthzHandling
from oic.utils.keyio import keyjar_init
from oic.utils.sdb import SessionDB
from oic.utils.userinfo import UserInfo
from otest.events import Events
from oauth2test.rp.provider import Provider
import csv
LOGGER = logging.getLogger(__name__)
__author__ = 'roland'
def read_uri_schemes(filename):
csvfile = open(filename, 'r')
l = csvfile.readline()
l = l.strip()
fieldnames = l.split(',')
reader = csv.DictReader(csvfile, fieldnames)
return dict(
[(r['URI Scheme'], '{} {}'.format(r['Description'], r['Reference'])) for
r in reader])
def main_setup(args, lookup):
sys.path.insert(0, ".")
config = importlib.import_module(args.config)
config.issuer = config.issuer % args.port
config.SERVICE_URL = config.SERVICE_URL % args.port
# Client data base
# cdb = shelve.open(config.CLIENT_DB, writeback=True)
cdb = {}
ac = AuthnBroker()
for authkey, value in list(config.AUTHENTICATION.items()):
authn = None
# if "UserPassword" == authkey:
# from oic.utils.authn.user import UsernamePasswordMako
# authn = UsernamePasswordMako(None, "login.mako", LOOKUP, PASSWD,
# "authorization")
if "NoAuthn" == authkey:
from oic.utils.authn.user import NoAuthn
authn = NoAuthn(None, user=config.AUTHENTICATION[authkey]["user"])
if authn is not None:
ac.add(config.AUTHENTICATION[authkey]["ACR"], authn,
config.AUTHENTICATION[authkey]["WEIGHT"])
# dealing with authorization
authz = AuthzHandling()
kwargs = {
"template_lookup": lookup,
"template": {"form_post": "form_response.mako"},
}
if config.USERINFO == "SIMPLE":
# User info is a simple dictionary in this case statically defined in
# the configuration file
userinfo = UserInfo(config.USERDB)
else:
userinfo = None
# Should I care about verifying the certificates used by other entities
if args.insecure:
kwargs["verify_ssl"] = False
else:
kwargs["verify_ssl"] = True
uri_schemes = read_uri_schemes('uri-schemes-1.csv')
as_args = {
"name": config.issuer,
"cdb": cdb,
"authn_broker": ac,
"userinfo": userinfo,
"authz": authz,
"client_authn": verify_client,
"symkey": config.SYM_KEY,
"template_lookup": lookup,
"template": {"form_post": "form_response.mako"},
"jwks_name": "./static/jwks_{}.json",
'event_db': Events(),
}
com_args = {
"name": config.issuer,
# "sdb": SessionDB(config.baseurl),
"baseurl": config.baseurl,
"cdb": cdb,
"authn_broker": ac,
"userinfo": userinfo,
"authz": authz,
"client_authn": verify_client,
"symkey": config.SYM_KEY,
"template_lookup": lookup,
"template": {"form_post": "form_response.mako"},
"jwks_name": "./static/jwks_{}.json",
'uri_schemes': uri_schemes
}
op_arg = {}
try:
op_arg["cookie_ttl"] = config.COOKIETTL
except AttributeError:
pass
try:
op_arg["cookie_name"] = config.COOKIENAME
except AttributeError:
pass
try:
as_args['behavior'] = config.BEHAVIOR
except AttributeError:
pass
# print URLS
if args.debug:
op_arg["debug"] = True
if args.port == 80:
_baseurl = config.baseurl
else:
if config.baseurl.endswith("/"):
config.baseurl = config.baseurl[:-1]
_baseurl = "%s:%d" % (config.baseurl, args.port)
if not _baseurl.endswith("/"):
_baseurl += "/"
op_arg["baseurl"] = _baseurl
# Add own keys for signing/encrypting JWTs
try:
# a throw-away OP used to do the initial key setup
_op = Provider(sdb=SessionDB(com_args["baseurl"]), **com_args)
jwks = keyjar_init(_op, config.keys)
except KeyError:
pass
else:
op_arg["jwks"] = jwks
op_arg["keys"] = config.keys
as_args['jwks_uri'] = '{}{}/jwks.json'.format(_baseurl, 'static')
as_args['jwks_name'] = 'static/jwks.json'
f = open('static/jwks.json', 'w')
f.write(json.dumps(jwks))
f.close()
as_args['keyjar'] = _op.keyjar
as_args['sdb'] = SessionDB(
com_args["baseurl"],
token_factory=JWTToken('T', keyjar=_op.keyjar,
lt_pattern={'code': 3600, 'token': 900},
iss=_baseurl,
sign_alg='RS256'),
refresh_token_factory=JWTToken(
'R', keyjar=_op.keyjar, lt_pattern={'': 24 * 3600},
iss=_baseurl)
)
try:
op_arg["marg"] = multi_keys(as_args, config.multi_keys)
except AttributeError as err:
pass
return as_args, op_arg, config
def multi_keys(as_args, key_conf):
# a throw-away OP used to do the initial key setup
_op = Provider(**as_args)
jwks = keyjar_init(_op, key_conf, "m%d")
return {"jwks": jwks, "keys": key_conf}
|
heart-test-suites/oauth2test
|
src/oauth2test/rp/setup.py
|
Python
|
apache-2.0
| 5,448
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.enums",
marshal="google.ads.googleads.v9",
manifest={"SummaryRowSettingEnum",},
)
class SummaryRowSettingEnum(proto.Message):
r"""Indicates summary row setting in request parameter.
"""
class SummaryRowSetting(proto.Enum):
r"""Enum describing return summary row settings."""
UNSPECIFIED = 0
UNKNOWN = 1
NO_SUMMARY_ROW = 2
SUMMARY_ROW_WITH_RESULTS = 3
SUMMARY_ROW_ONLY = 4
__all__ = tuple(sorted(__protobuf__.manifest))
|
googleads/google-ads-python
|
google/ads/googleads/v9/enums/types/summary_row_setting.py
|
Python
|
apache-2.0
| 1,189
|
# Copyright 2015 Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""dogpile.cache backend that uses dictionary for storage"""
from dogpile.cache import api
from oslo_cache import core
from oslo_utils import timeutils
__all__ = [
'DictCacheBackend'
]
_NO_VALUE = core.NO_VALUE
class DictCacheBackend(api.CacheBackend):
"""A DictCacheBackend based on dictionary.
Arguments accepted in the arguments dictionary:
:param expiration_time: interval in seconds to indicate maximum
time-to-live value for each key in DictCacheBackend.
Default expiration_time value is 0, that means that all keys have
infinite time-to-live value.
:type expiration_time: real
"""
def __init__(self, arguments):
self.expiration_time = arguments.get('expiration_time', 0)
self.cache = {}
def get(self, key):
"""Retrieves the value for a key.
:param key: dictionary key
:returns: value for a key or :data:`oslo_cache.core.NO_VALUE`
for nonexistent or expired keys.
"""
(value, timeout) = self.cache.get(key, (_NO_VALUE, 0))
if self.expiration_time > 0 and timeutils.utcnow_ts() >= timeout:
self.cache.pop(key, None)
return _NO_VALUE
return value
def get_multi(self, keys):
"""Retrieves the value for a list of keys."""
return [self.get(key) for key in keys]
def set(self, key, value):
"""Sets the value for a key.
Expunges expired keys during each set.
:param key: dictionary key
:param value: value associated with the key
"""
self.set_multi({key: value})
def set_multi(self, mapping):
"""Set multiple values in the cache.
Expunges expired keys during each set.
:param mapping: dictionary with key/value pairs
"""
self._clear()
timeout = 0
if self.expiration_time > 0:
timeout = timeutils.utcnow_ts() + self.expiration_time
for key, value in mapping.items():
self.cache[key] = (value, timeout)
def delete(self, key):
"""Deletes the value associated with the key if it exists.
:param key: dictionary key
"""
self.cache.pop(key, None)
def delete_multi(self, keys):
"""Deletes the value associated with each key in list if it exists.
:param keys: list of dictionary keys
"""
for key in keys:
self.cache.pop(key, None)
def _clear(self):
"""Expunges expired keys."""
now = timeutils.utcnow_ts()
for k in list(self.cache):
(_value, timeout) = self.cache[k]
if timeout > 0 and now >= timeout:
del self.cache[k]
|
openstack/oslo.cache
|
oslo_cache/backends/dictionary.py
|
Python
|
apache-2.0
| 3,290
|
import codecs
import os
import sys
input_data_path=sys.argv[1]
output_file_path=sys.argv[2]
output_propid_file=sys.argv[3]
LEMMA_IDX = int(sys.argv[4])
PROPS_IDX = LEMMA_IDX + 1
fout = open(output_file_path, 'w')
fout_propid = open(output_propid_file, 'w')
total_props = 0
total_sents = 0
doc_counts = 0
prev_words = ''
words = []
props = []
tags = []
spans = []
all_props = []
tag_dict={}
label_dict = {}
def print_new_sentence():
global total_props
global total_sents
global words
global props
global tags
global span
global all_props
global fout
global fout_propid
global domain
#if len(props) > 0:
total_props += len(props)
total_sents += 1
#print len(props), len(tags)
propid_labels = ['O' for _ in words]
assert len(props) == len(tags)
for t in range(len(props)):
assert len(tags[t]) == len(words)
#print tags[t], props[t], words
# For example, "rubber stamp" is a verbal predicate, and stamp is the predicate head.
assert tags[t][props[t]] in {"B-V", "I-V"}
propid_labels[props[t]] = 'V'
fout.write(str(props[t]) + " " + " ".join(words) + " ||| " + " ".join(tags[t]) + "\n")
fout_propid.write(" ".join(words) + " ||| " + " ".join(propid_labels) + "\n")
fin = open(input_data_path, 'r')
for line in fin:
line = line.strip()
if line == '':
joined_words = " ".join(words)
prev_words = joined_words
print_new_sentence()
words = []
props = []
tags = []
spans = []
all_props = []
continue
info = line.split()
word = info[0]
#print info
words.append(word)
idx = len(words) - 1
if idx == 0:
tags = [[] for _ in info[PROPS_IDX:]]
spans = ["" for _ in info[PROPS_IDX:]]
# Lemma position.
is_predicate = (info[LEMMA_IDX] != '-')
for t in range(len(tags)):
arg = info[PROPS_IDX + t]
label = arg.strip("()*")
label_dict[arg] = 1
if "(" in arg:
tags[t].append("B-" + label)
spans[t] = label
elif spans[t] != "":
tags[t].append("I-" + spans[t])
else:
tags[t].append("O")
if ")" in arg:
spans[t] = ""
if is_predicate:
props.append(idx)
fin.close()
fout.close()
fout_propid.close()
print ('Processed {} documents, {} sentences and {} predicates.'.format(
doc_counts, total_sents, total_props))
print ('Write SRL data to {} and predicate-id data to {}.'.format(
output_file_path, output_propid_file))
|
luheng/deep_srl
|
preprocess/process_conll05.py
|
Python
|
apache-2.0
| 2,460
|
# -*- coding: utf-8 -*-
'''
Higherarchical Distributed Hash Table index
'''
# Import python libs
import os
import io
import shutil
import struct
import hashlib
# Import alder libs
import alder.aid.rand
import alder.aid.traverse
import alder.store.serial
# Import Third Party Libs
import msgpack
# index header types:
# k: "keep" the entry an the data
# r: "remove" the entry and the data
# e: "expired" remove the index entry but keep the data, another entry
# references it
HEADER_DELIM = '_||_||_'
IND_HEAD_FMT = '>Hc'
def _calc_pos(c_key, hash_limit, b_size, header_len):
'''
Calculate the hash position in the table file
'''
return (abs(hash(c_key) & hash_limit) * b_size) + header_len
class HDHT(object):
'''
The main index, the Higherarchical Distributed Hash Table
'''
def __init__(
self,
root,
key_delim='/',
hash_limit=0xfffff,
key_hash='sha1',
fmt='>KsQH',
fmt_map=None,
header_len=1024,
serial='msgpack'):
if fmt_map is None:
self.fmt_map = ('key', 'prev', 'rev')
else:
self.fmt_map = fmt_map
self.root = root
self.key_delim = key_delim
self.hash_limit = hash_limit
self.key_hash = key_hash
self.header_len = header_len
self.crypt_func = self.__crypt_func()
self.key_size = self.__gen_key_size()
self.fmt = fmt.replace('K', str(self.key_size))
self.bucket_size = self.__gen_bucket_size()
self.serial = alder.store.serial.Serial(serial)
self.tables = {}
def __crypt_func(self):
'''
Return the function to use to crypt hash index keys
'''
if self.key_hash.startswith('blake'):
import libnacl.blake
return libnacl.blake.blake2b
return getattr(hashlib, self.key_hash)
def __gen_key_size(self):
'''
Return the length of the crypt_key
'''
return len(self.raw_crypt_key('alder is asychronous lexical distributed event roster'))
def __gen_bucket_size(self):
'''
Calculate the size of the index buckets
'''
args = []
for arg in self.fmt_map:
if arg == 'key':
args.append('0' * self.key_size)
elif arg == 'prev':
args.append(1)
elif arg == 'rev':
args.append(1)
return len(struct.pack(self.fmt, *args))
def _open_hash_table(self, fn_):
'''
Return the header data for the table at the given location, open if
needed
'''
if fn_ in self.tables:
return self.tables[fn_]
if not os.path.isfile(fn_):
raise IOError()
fp_ = io.open(fn_, 'r+b')
header = {'fp': fp_}
raw_head = ''
while True:
raw_read = fp_.read(self.header_len)
if not raw_read:
raise ValueError('Hit the end of the index file with no header!')
raw_head += raw_read
if HEADER_DELIM in raw_head:
header.update(
msgpack.loads(
raw_head[:raw_head.find(HEADER_DELIM)]
)
)
self.tables[fn_] = header
return header
def raw_crypt_key(self, key):
'''
Return the crypted key
'''
return self.crypt_func(key.lstrip(self.key_delim)).digest()
def entry_root(self, key):
'''
Return the root directory to be used for the entry
'''
key = key.strip(self.key_delim)
if self.key_delim not in key:
return self.root
root = key[:key.rfind(self.key_delim)].replace(self.key_delim, os.sep)
return os.path.join(self.root, root)
def entry_base(self, key):
'''
Return the key basename
'''
if self.key_delim not in key:
return key
key = key.strip(self.key_delim)
return key[key.rfind(self.key_delim):].replace(self.key_delim, os.sep).lstrip(self.key_delim)
def get_hash_table(self, fn_):
'''
Create a new hash table at the given location
'''
if os.path.exists(fn_):
return self._open_hash_table(fn_)
dirname = os.path.dirname(fn_)
if not os.path.exists(dirname):
os.makedirs(dirname)
header = {
'hash': self.key_hash,
'hash_limit': self.hash_limit,
'header_len': self.header_len,
'fmt': self.fmt,
'bucket_size': self.bucket_size,
'fmt_map': self.fmt_map,
'dir': os.path.dirname(fn_),
'num': int(fn_[fn_.rindex('_') + 1:]),
}
header_entry = '{0}{1}'.format(msgpack.dumps(header), HEADER_DELIM)
fp_ = io.open(fn_, 'w+b')
fp_.write(header_entry)
fp_.seek(((self.hash_limit + 2) * self.bucket_size) + self.header_len)
fp_.write('\0')
header['fp'] = fp_
self.tables[fn_] = header
return header
def index_entry(self, key, id_, type_, prev, **kwargs):
'''
Return the index data entry string
'''
entry = {
'key': key,
't': type_,
'p': prev,
}
entry.update(kwargs)
if not id_:
entry['id'] = alder.aid.rand.gen_id()
else:
entry['id'] = id_
packed = msgpack.dumps(entry)
p_len = struct.pack(IND_HEAD_FMT, len(packed), 'k')
return '{0}{1}'.format(p_len, packed), entry
def _table_map(self, comps, fmt_map):
'''
Convert a table map to a dict
'''
ret = {}
for ind in range(len(fmt_map)):
ret[fmt_map[ind]] = comps[ind]
return ret
def get_table_entry(self, key, c_key):
'''
Return the entry location for the given key and crypt key pair
'''
root = self.entry_root(key)
num = 0
while True:
table_fn = os.path.join(root, 'alder_table_{0}'.format(num))
table = self.get_hash_table(table_fn)
pos = _calc_pos(
c_key,
table['hash_limit'],
table['bucket_size'],
table['header_len'])
table['fp'].seek(pos)
bucket = table['fp'].read(table['bucket_size'])
try:
comps = struct.unpack(table['fmt'], bucket)
if comps[0] == '\0' * self.key_size:
comps = (None, None, -1)
except struct.error:
comps = (None, None, -1)
ret = self._table_map(comps, table['fmt_map'])
ret['pos'] = pos
ret['tfn'] = table['fp'].name
ret['num'] = num
if ret['key'] is None:
return ret
if ret['key'] == c_key:
return ret
# Adding these lines in will show keys that collide
# in the hash table in the tests
#print('***************')
#print(self._read_index_entry(table, ret['prev']))
#print(key)
#print('***************')
num += 1
def _read_index_entry(self, table, prev):
table['fp'].seek(prev)
table['fp'].seek(prev)
data_head = struct.unpack(IND_HEAD_FMT, table['fp'].read(3))
index = msgpack.loads(table['fp'].read(data_head[0]))
index['_status'] = data_head[1]
return index
def get_index_entry(self, key, id_=None, count=None):
'''
Get the data entry for the given key
'''
ret = {}
c_key = self.raw_crypt_key(key)
table_entry = self.get_table_entry(key, c_key)
if not table_entry['key']:
return None
table = self.tables[table_entry['tfn']]
prev = table_entry['prev']
if prev == 0:
# There is no data, stubbed out for deletion, return None
return None
ret['table'] = table_entry
rev = table_entry['rev']
counted = 0
rets = {'data': [], 'table': table_entry}
while True:
index_entry = self._read_index_entry(table, prev)
ret['data'] = index_entry
if id_:
if index_entry['id'] == id_:
ret['table']['rev'] = rev
return ret
if index_entry['p']:
prev = index_entry['p']
rev -= 1
continue
return ret
elif count:
if counted < count:
rets['data'].append(index_entry)
counted += 1
prev = index_entry['p']
if prev is None:
return rets
else:
return rets
else:
return ret
def _get_table_entries(self, fn_):
'''
Return the table entries in a given table
'''
table = self.get_hash_table(fn_)
table['fp'].seek(table['header_len'])
seek_lim = ((self.hash_limit + 2) * self.bucket_size) + self.header_len
while True:
bucket = table['fp'].read(table['bucket_size'])
if table['fp'].tell() > seek_lim:
break
if bucket.startswith('\0'):
continue
try:
comps = struct.unpack(table['fmt'], bucket)
if comps[0] == '\0' * self.key_size:
comps = (None, None, -1)
except struct.error:
comps = (None, None, -1)
if not comps[0]:
continue
ret = self._table_map(comps, table['fmt_map'])
table_start = table['fp'].tell()
data = self._read_index_entry(table, ret['prev'])
table['fp'].seek(table_start)
ret['key'] = data['key']
yield ret
def rm_key(self, key, id_=None):
'''
Remove a key id_, if no id_ is specified the key is recursively removed
'''
ret = False
c_key = self.raw_crypt_key(key)
table_entry = self.get_table_entry(key, c_key)
table = self.tables[table_entry['tfn']]
prev = table_entry['prev']
while True:
stub = True
table['fp'].seek(prev)
data_head = struct.unpack(IND_HEAD_FMT, table['fp'].read(3))
index_entry = msgpack.loads(table['fp'].read(data_head[0]))
if id_:
if index_entry['id'] != id_:
stub = False
else:
stub = True
if stub:
table['fp'].seek(prev)
table['fp'].write(struct.pack(IND_HEAD_FMT, data_head[0], 'r'))
ret = True
if id_:
break
prev = index_entry['p']
if not prev:
break
if not id_:
# Chck if the next table has a collision entry, if so keey this
# table entry and mark it for removal in a compact call
next_fn = '{0}{1}'.format(table['fp'].name[:-1], table['num'] + 1)
collision = False
if os.path.isfile(next_fn):
next_table = self.get_hash_table(next_fn)
next_table['fp'].seek(table_entry['pos'])
next_raw_entry = next_table['fp'].read(next_table['bucket_size'])
if next_raw_entry != '\0' * next_table['bucket_size']:
collision = True
# Stub out the table entry as well
if not collision:
stub_entry = '\0' * table['bucket_size']
else:
stub_entry = struct.pack(table['fmt'], table_entry['key'], 0, 0)
table['fp'].seek(table_entry['pos'])
table['fp'].write(stub_entry)
ret = True
return ret
def rmdir(self, d_key):
'''
Recursively remove a key directory and all keys and key data
therein and below.
'''
fn_root = self.root
if not d_key or d_key == self.key_delim:
pass
else:
fn_root = self.entry_root('{0}/blank'.format(d_key))
shutil.rmtree(fn_root)
return True
def listdir(self, d_key):
'''
Return a list of the keys
'''
fn_root = self.root
ret = []
if not d_key or d_key == self.key_delim:
pass
else:
fn_root = self.entry_root('{0}/blank'.format(d_key))
for fn_ in os.listdir(fn_root):
if not fn_.startswith('alder_table_'):
continue
full = os.path.join(fn_root, fn_)
for entry in self._get_table_entries(full):
ret.append(entry)
return ret
def write_table_entry(self, table_entry, c_key, prev):
'''
Write a table entry
'''
table = self.get_hash_table(table_entry['tfn'])
t_str = struct.pack(table['fmt'], c_key, prev, table_entry['rev'] + 1)
table['fp'].seek(table_entry['pos'])
table['fp'].write(t_str)
return table_entry['rev'] + 1
def write_index_entry(
self,
table_entry,
key,
id_,
type_,
**kwargs):
'''
Write a data entry
'''
table = self.get_hash_table(table_entry['tfn'])
raw, entry = self.index_entry(
key,
id_,
type_,
table_entry['prev'],
**kwargs)
table['fp'].seek(0, 2)
prev = table['fp'].tell()
table['fp'].write(raw)
return prev, entry
def commit(
self,
table_entry,
key,
c_key,
id_,
type_,
**kwargs):
prev, entry = self.write_index_entry(
table_entry,
key,
id_,
type_,
**kwargs)
entry['rev'] = self.write_table_entry(table_entry, c_key, prev)
return entry
def write_doc_stor(self, table_entry, data, serial=None):
'''
Write the data to the storage file
'''
table = self.get_hash_table(table_entry['tfn'])
serial = serial if serial else self.serial.default
serial_fun = getattr(self.serial, '{0}_dump'.format(serial))
serial_data = serial_fun(data)
table['fp'].seek(0, 2)
start = table['fp'].tell()
table['fp'].write(serial_data)
return {'st': start, 'sz': len(serial_data)}
def read_doc_stor(self, entries, serial=None, **kwargs):
'''
Read in the data
'''
table = self.get_hash_table(entries['table']['tfn'])
table['fp'].seek(entries['data']['st'])
raw = table['fp'].read(entries['data']['sz'])
serial = serial if serial else self.serial.default
serial_fun = getattr(self.serial, '{0}_load'.format(serial))
ret = serial_fun(raw)
if kwargs.get('doc_path'):
return alder.aid.traverse.traverse_dict_and_list(ret, kwargs['doc_path'])
return ret
|
AlderDHT/alder
|
alder/index/hdht.py
|
Python
|
apache-2.0
| 15,419
|
import re
import uuid as py_uuid
from cattle import ApiError
from common_fixtures import * # NOQA
from test_volume import VOLUME_CLEANUP_LABEL
TEST_IMAGE = 'ibuildthecloud/helloworld'
TEST_IMAGE_LATEST = TEST_IMAGE + ':latest'
TEST_IMAGE_UUID = 'docker:' + TEST_IMAGE
if_docker = pytest.mark.skipif("os.environ.get('DOCKER_TEST') == 'false'",
reason='DOCKER_TEST is not set')
os_environ = "os.environ.get('DOCKER_VERSION') != '1.12.1'"
if_docker_1_12 = pytest.mark.skipif(os_environ,
reason='Docker version is not 1.12.1')
sched_environ = "os.environ.get('CATTLE_TEST_RESOURCE_SCHEDULER') != 'true'"
if_resource_scheduler = pytest.mark.skipif(sched_environ)
@pytest.fixture(scope='session')
def docker_client(super_client):
for host in super_client.list_host(state='active', remove_null=True,
kind='docker'):
key = super_client.create_api_key(accountId=host.accountId)
super_client.wait_success(key)
wait_for(lambda: host.agent().state == 'active')
wait_for(lambda: len(host.storagePools()) > 0 and
host.storagePools()[0].state == 'active')
return api_client(key.publicValue, key.secretValue)
raise Exception('Failed to find docker host, please register one')
@if_docker
def test_docker_create_only(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge',
startOnCreate=False)
try:
container = docker_client.wait_success(container)
assert container is not None
assert 'container' == container.type
image = super_client.reload(container).image()
assert image.instanceKind == 'container'
image_mapping = filter(
lambda m: m.storagePool().external,
image.imageStoragePoolMaps()
)
assert len(image_mapping) == 0
assert not image.isPublic
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_create_only_from_sha(docker_client, super_client):
image_name = 'tianon/true@sha256:662fc60808e6d5628a090e39' \
'b4bcae694add28a626031cc889109c2cf2af5d73'
uuid = 'docker:' + image_name
container = docker_client.create_container(name='test-sha256',
imageUuid=uuid,
networkMode='bridge',
startOnCreate=False)
try:
container = docker_client.wait_success(container)
assert container is not None
assert 'container' == container.type
image = super_client.reload(container).image()
assert image.instanceKind == 'container'
image_mapping = filter(
lambda m: m.storagePool().external,
image.imageStoragePoolMaps()
)
assert len(image_mapping) == 0
assert not image.isPublic
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_create_with_start(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge')
try:
assert container.state == 'creating'
container = super_client.wait_success(container)
assert container.state == 'running'
assert container.data.dockerContainer.Image == TEST_IMAGE
assert len(container.volumes()) == 1
image = container.volumes()[0].image()
image = super_client.reload(image)
image_mapping = filter(
lambda m: not m.storagePool().external,
image.imageStoragePoolMaps()
)
assert len(image_mapping) == 1
assert image_mapping[0].imageId == image.id
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_build(docker_client, super_client):
uuid = 'image-' + random_str()
url = 'https://github.com/rancherio/tiny-build/raw/master/build.tar'
container = docker_client.create_container(imageUuid='docker:' + uuid,
networkMode='bridge',
build={
'context': url,
})
try:
assert container.state == 'creating'
container = super_client.wait_success(container)
# This builds tianon/true which just dies
assert container.state == 'running' or container.state == 'stopped'
assert container.transitioning == 'no'
assert container.data.dockerContainer.Image == uuid
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_create_with_start_using_docker_io(docker_client, super_client):
image = 'docker.io/' + TEST_IMAGE
uuid = 'docker:' + image
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge')
container = super_client.wait_success(container)
assert container.state == 'running'
assert container.data.dockerContainer.Image == image
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_command(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge',
command=['sleep', '42'])
try:
container = super_client.wait_success(container)
assert container.data.dockerInspect.Config.Cmd == ['sleep', '42']
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_command_args(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge',
command=['sleep', '1', '2',
'3'])
try:
container = super_client.wait_success(container)
assert container.data.dockerInspect.Config.Cmd == ['sleep', '1', '2',
'3']
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_short_lived_container(docker_client, super_client):
container = docker_client.create_container(imageUuid="docker:tianon/true",
networkMode='bridge')
container = wait_for_condition(
docker_client, container,
lambda x: x.state == 'stopped',
lambda x: 'State is: ' + x.state)
assert container.state == 'stopped'
assert container.transitioning == 'no'
@if_docker
def test_docker_stop(docker_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge')
assert container.state == 'creating'
container = docker_client.wait_success(container)
assert container.state == 'running'
start = time.time()
container = container.stop(timeout=0)
assert container.state == 'stopping'
container = docker_client.wait_success(container)
delta = time.time() - start
assert container.state == 'stopped'
assert delta < 10
@if_docker
def test_docker_purge(docker_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge')
assert container.state == 'creating'
container = docker_client.wait_success(container)
assert container.state == 'running'
container = container.stop(timeout=0)
assert container.state == 'stopping'
container = docker_client.wait_success(container)
assert container.state == 'stopped'
docker_client.delete(container)
container = docker_client.wait_success(container)
assert container.removed is not None
safe_purge(container, docker_client)
volumes = container.volumes()
assert len(volumes) == 0
def safe_purge(c, docker_client):
try:
c.purge()
except (ApiError, AttributeError):
# It's possible for the container to already have been purged
pass
c = docker_client.wait_success(c)
assert c.state == 'purged'
return c
@if_docker
def test_docker_image_format(docker_client, super_client):
uuid = TEST_IMAGE_UUID
container = docker_client.create_container(imageUuid=uuid,
networkMode='bridge')
try:
container = docker_client.wait_success(container)
container = super_client.reload(container)
assert container.image().format == 'docker'
assert container.volumes()[0].image().format == 'docker'
assert container.volumes()[0].format == 'docker'
finally:
if container is not None:
docker_client.delete(container)
@if_docker
def test_docker_ports_from_container_publish_all(docker_client):
uuid = TEST_IMAGE_UUID
c = docker_client.create_container(networkMode='bridge',
publishAllPorts=True,
imageUuid=uuid)
c = docker_client.wait_success(c)
assert c.state == 'running'
ports = c.ports_link()
assert len(ports) == 1
port = ports[0]
assert port.publicPort is not None
assert port.privatePort == 8080
assert port.publicIpAddressId is not None
assert port.kind == 'imagePort'
docker_client.delete(c)
@if_docker
def test_docker_ports_from_container_no_publish(docker_client):
uuid = TEST_IMAGE_UUID
c = docker_client.create_container(imageUuid=uuid,
networkMode='bridge')
c = docker_client.wait_success(c)
assert c.state == 'running'
ports = c.ports_link()
assert len(ports) == 1
port = ports[0]
assert port.publicPort is None
assert port.privatePort == 8080
assert port.publicIpAddressId is not None
assert port.kind == 'imagePort'
docker_client.delete(c)
@if_docker
def test_docker_ports_from_container(docker_client, super_client):
def reload(x):
return super_client.reload(x)
_ = reload
uuid = TEST_IMAGE_UUID
c = docker_client.create_container(networkMode='bridge',
startOnCreate=False,
publishAllPorts=True,
imageUuid=uuid,
ports=[
'8081',
'8082/tcp',
'8083/udp'])
c = docker_client.wait_success(c)
assert c.state == 'stopped'
count = 0
for port in c.ports_link():
count += 1
assert port.kind == 'userPort'
assert port.publicPort is None
assert port.privateIpAddressId is None
assert port.publicIpAddressId is None
if port.privatePort == 8081:
assert port.protocol == 'tcp'
elif port.privatePort == 8082:
assert port.protocol == 'tcp'
elif port.privatePort == 8083:
assert port.protocol == 'udp'
else:
assert False
assert count == 3
c = docker_client.wait_success(c.start())
assert c.state == 'running'
count = 0
ip = None
privateIp = None
for port in c.ports_link():
count += 1
assert port.privateIpAddressId is not None
privateIp = port.privateIpAddress()
assert privateIp.kind == 'docker'
assert _(privateIp).subnetId is None
assert port.publicPort is not None
assert port.publicIpAddressId is not None
if ip is None:
ip = port.publicIpAddressId
assert port.publicIpAddressId == ip
if port.privatePort == 8081:
assert port.kind == 'userPort'
assert port.protocol == 'tcp'
elif port.privatePort == 8082:
assert port.kind == 'userPort'
assert port.protocol == 'tcp'
elif port.privatePort == 8083:
assert port.kind == 'userPort'
assert port.protocol == 'udp'
elif port.privatePort == 8080:
assert port.kind == 'imagePort'
else:
assert False
assert count == 4
assert c.primaryIpAddress == privateIp.address
c = docker_client.wait_success(c.stop(timeout=0))
assert c.state == 'stopped'
count = 0
for nic in _(c).nics():
for ip in nic.ipAddresses():
count += 1
assert ip.kind == 'docker'
assert ip.state == 'inactive'
assert ip.address is None
assert count == 1
c = docker_client.wait_success(c.start())
if c.state != 'running':
super_c = super_client.reload(c)
print 'DEBUG Container not running: %s' % super_c
assert c.state == 'running'
count = 0
for nic in _(c).nics():
for ip in nic.ipAddresses():
count += 1
assert ip.kind == 'docker'
assert ip.state == 'active'
assert ip.address is not None
assert count == 1
docker_client.delete(c)
@if_docker
def test_no_port_override(docker_client, super_client):
c = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
networkMode='bridge',
ports=['8083:8080'])
try:
c = super_client.wait_success(c, timeout=240)
assert c.state == 'running'
ports = c.ports_link()
assert len(ports) == 1
assert ports[0].kind == 'userPort'
assert ports[0].publicPort == 8083
assert ports[0].privatePort == 8080
finally:
if c is not None:
super_client.delete(c)
@if_docker
def test_docker_volumes(docker_client, super_client):
def reload(x):
return super_client.reload(x)
_ = reload
uuid = TEST_IMAGE_UUID
bind_mount_uuid = py_uuid.uuid4().hex
bar_host_path = '/tmp/bar%s' % bind_mount_uuid
bar_bind_mount = '%s:/bar' % bar_host_path
c = docker_client.create_container(imageUuid=uuid,
networkMode='bridge',
startOnCreate=False,
dataVolumes=['/foo',
bar_bind_mount])
c = docker_client.wait_success(c)
assert len(c.dataVolumes) == 2
assert set(c.dataVolumes) == set(['/foo', bar_bind_mount])
c = super_client.wait_success(c.start())
volumes = c.volumes()
assert len(volumes) == 1
mounts = c.mounts_link()
assert len(mounts) == 2
foo_mount, bar_mount = None, None
foo_vol, bar_vol = None, None
for mount in mounts:
assert mount.instance().id == c.id
if mount.path == '/foo':
foo_mount = mount
foo_vol = mount.volume()
elif mount.path == '/bar':
bar_mount = mount
bar_vol = mount.volume()
foo_vol = wait_for_condition(
docker_client, foo_vol, lambda x: x.state == 'active')
assert foo_mount is not None
assert foo_mount.permissions == 'rw'
assert foo_vol is not None
assert not foo_vol.isHostPath
assert _(foo_vol).attachedState == 'inactive'
bar_vol = wait_for_condition(
docker_client, bar_vol, lambda x: x.state == 'active')
assert bar_mount is not None
assert bar_mount.permissions == 'rw'
assert bar_vol is not None
assert _(bar_vol).attachedState == 'inactive'
assert bar_vol.isHostPath
# We use 'in' instead of '==' because Docker uses the fully qualified
# non-linked path and it might look something like: /mnt/sda1/<path>
assert bar_host_path in bar_vol.uri
c2 = docker_client.create_container(name="volumes_from_test",
networkMode='bridge',
imageUuid=uuid,
startOnCreate=False,
dataVolumesFrom=[c.id])
c2 = docker_client.wait_success(c2)
assert len(c2.dataVolumesFrom) == 1
assert set(c2.dataVolumesFrom) == set([c.id])
c2 = super_client.wait_success(c2.start())
c2_mounts = c2.mounts_link()
assert len(c2_mounts) == 2
for mount in c2_mounts:
assert mount.instance().id == c2.id
if mount.path == '/foo':
assert mount.volumeId == foo_vol.id
elif mount.path == '/bar':
assert mount.volumeId == bar_vol.id
c = docker_client.wait_success(c.stop(remove=True, timeout=0))
c2 = docker_client.wait_success(c2.stop(remove=True, timeout=0))
# set it as false bc we delete volume as soon as we delete container
_check_path(foo_vol, False, docker_client, super_client)
_check_path(bar_vol, True, docker_client, super_client)
@if_docker
def test_volumes_from_more_than_one_container(docker_client):
c = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
networkMode='bridge',
dataVolumes=['/foo'])
docker_client.wait_success(c)
c2 = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
networkMode='bridge',
dataVolumes=['/bar'])
docker_client.wait_success(c2)
c3 = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
networkMode='bridge',
dataVolumesFrom=[c.id, c2.id])
c3 = docker_client.wait_success(c3)
mounts = c3.mounts_link()
assert len(mounts) == 2
paths = ['/foo', '/bar']
for m in mounts:
assert m.path in paths
@if_docker
def test_stack_volume_delete(docker_client, super_client):
stack = docker_client.create_stack(name=random_str())
stack = docker_client.wait_success(stack)
docker_client.create_volumeTemplate(name="foo", stackId=stack.id)
# create service
launch_config = {"imageUuid": "docker:debian", "dataVolumes": "foo:/bar",
"networkMode": "none",
"labels": {"io.rancher.container.start_once": "true"},
"command": ["mkdir", "/bar/touched"]}
svc1 = docker_client.create_service(name=random_str(), stackId=stack.id,
launchConfig=launch_config, scale=1)
svc1 = docker_client.wait_success(svc1)
docker_client.wait_success(svc1.activate())
c = _validate_compose_instance_stopped(docker_client, svc1, stack, "1")
mounts = check_mounts(docker_client, c, 1)
vol = mounts[0].volume()
# remove stack, validate its volume is removed on the host
docker_client.wait_success(stack.remove())
_check_path(vol, False, docker_client, super_client,
["%s:/test" % vol.name], "/test/touched")
def _validate_compose_instance_stopped(client, service, env,
number, launch_config_name=None):
cn = launch_config_name + "-" if launch_config_name is not None else ""
name = env.name + "-" + service.name + "-" + cn + number
def wait_for_map_count(service):
instances = client.list_container(name=name, state="stopped")
return len(instances) == 1
wait_for(lambda: wait_for_condition(client, service, wait_for_map_count))
instances = client.list_container(name=name, state="stopped")
return instances[0]
@if_docker
def test_container_fields(docker_client, super_client):
caps = ["SYS_MODULE", "SYS_RAWIO", "SYS_PACCT", "SYS_ADMIN",
"SYS_NICE", "SYS_RESOURCE", "SYS_TIME", "SYS_TTY_CONFIG",
"MKNOD", "AUDIT_WRITE", "AUDIT_CONTROL", "MAC_OVERRIDE",
"MAC_ADMIN", "NET_ADMIN", "SYSLOG", "CHOWN", "NET_RAW",
"DAC_OVERRIDE", "FOWNER", "DAC_READ_SEARCH", "FSETID",
"KILL", "SETGID", "SETUID", "LINUX_IMMUTABLE",
"NET_BIND_SERVICE", "NET_BROADCAST", "IPC_LOCK",
"IPC_OWNER", "SYS_CHROOT", "SYS_PTRACE", "SYS_BOOT",
"LEASE", "SETFCAP", "WAKE_ALARM", "BLOCK_SUSPEND", "ALL"]
test_name = 'container_test'
image_uuid = 'docker:ibuildthecloud/helloworld'
restart_policy = {"maximumRetryCount": 2, "name": "on-failure"}
c = docker_client.create_container(name=test_name + random_str(),
networkMode='bridge',
imageUuid=image_uuid,
capAdd=caps,
capDrop=caps,
dnsSearch=['8.8.8.8', '1.2.3.4'],
dns=['8.8.8.8', '1.2.3.4'],
privileged=True,
domainName="rancher.io",
memory=12000000,
memorySwap=16000000,
memoryReservation=4194304,
cpuSet="0,1",
stdinOpen=True,
tty=True,
command=["true"],
entryPoint=["/bin/sh", "-c"],
cpuShares=400,
restartPolicy=restart_policy,
devices="/dev/null:/dev/xnull:rw")
c = super_client.wait_success(c)
wait_for(lambda: super_client.reload(c).data['dockerInspect'] is not None)
wait_for(lambda: super_client.
reload(c).data['dockerInspect']['HostConfig'] is not None)
assert set(c.data['dockerInspect']['HostConfig']['CapAdd']) == set(caps)
assert set(c.data['dockerInspect']['HostConfig']['CapDrop']) == set(caps)
actual_dns = c.data['dockerInspect']['HostConfig']['Dns']
# TODO: when networking is back
# assert set(actual_dns) == set(['8.8.8.8', '1.2.3.4', '169.254.169.250'])
assert set(actual_dns) == set(['8.8.8.8', '1.2.3.4'])
actual_dns = c.data['dockerInspect']['HostConfig']['DnsSearch']
# TODO: when networking is back
# assert set(actual_dns) == set(['8.8.8.8', '1.2.3.4', 'rancher.internal'])
assert set(actual_dns) == set(['8.8.8.8', '1.2.3.4'])
assert c.data['dockerInspect']['HostConfig']['Privileged']
assert c.data['dockerInspect']['Config']['Domainname'] == "rancher.io"
assert c.data['dockerInspect']['HostConfig']['Memory'] == 12000000
assert c.data['dockerInspect']['HostConfig'][
'MemoryReservation'] == 4194304
# assert c.data['dockerInspect']['Config']['MemorySwap'] == 16000000
assert c.data['dockerInspect']['HostConfig']['CpusetCpus'] == "0,1"
assert c.data['dockerInspect']['Config']['Tty']
assert c.data['dockerInspect']['Config']['OpenStdin']
actual_entry_point = set(c.data['dockerInspect']['Config']['Entrypoint'])
assert actual_entry_point == set(["/bin/sh", "-c"])
assert c.data['dockerInspect']['HostConfig']['CpuShares'] == 400
act_restart_pol = c.data['dockerInspect']['HostConfig']['RestartPolicy']
assert act_restart_pol['MaximumRetryCount'] == 2
assert act_restart_pol['Name'] == "on-failure"
actual_devices = c.data['dockerInspect']['HostConfig']['Devices']
assert len(actual_devices) == 1
assert actual_devices[0]['CgroupPermissions'] == "rw"
assert actual_devices[0]['PathOnHost'] == "/dev/null"
assert actual_devices[0]['PathInContainer'] == "/dev/xnull"
@if_docker
def test_docker_newfields(docker_client, super_client):
test_name = 'container_field_test'
image_uuid = 'docker:ibuildthecloud/helloworld'
privileged = True
blkioWeight = 100
cpuPeriod = 100000
cpuQuota = 50000
cpuSetMems = "0"
kernelMemory = 10000000
memory = 10000000
groupAdd = ['root']
memorySwappiness = 50
oomScoreAdj = 500
shmSize = 67108864
tmpfs = {"/run": "rw,noexec,nosuid,size=65536k"}
uts = "host"
ipcMode = "host"
stopSignal = "SIGTERM"
stopTimeout = 10
ulimits = [{"name": "cpu", "hard": 100000, "soft": 100000}]
c = docker_client.create_container(name=test_name,
imageUuid=image_uuid,
privileged=privileged,
blkioWeight=blkioWeight,
cpuPeriod=cpuPeriod,
cpuQuota=cpuQuota,
cpuSetMems=cpuSetMems,
kernelMemory=kernelMemory,
groupAdd=groupAdd,
memory=memory,
memorySwappiness=memorySwappiness,
oomScoreAdj=oomScoreAdj,
shmSize=shmSize,
tmpfs=tmpfs,
uts=uts,
ipcMode=ipcMode,
runInit=True,
stopSignal=stopSignal,
stopTimeout=stopTimeout,
networkMode='bridge',
ulimits=ulimits)
c = super_client.wait_success(c)
wait_for(lambda: super_client.reload(c).data['dockerInspect'] is not None)
wait_for(lambda: super_client.
reload(c).data['dockerInspect']['HostConfig'] is not None)
assert c.data['dockerInspect']['HostConfig']['BlkioWeight'] == 100
assert c.data['dockerInspect']['HostConfig']['CpuPeriod'] == 100000
assert c.data['dockerInspect']['HostConfig']['CpuQuota'] == 50000
assert c.data['dockerInspect']['HostConfig']['CpusetMems'] == "0"
assert c.data['dockerInspect']['HostConfig']['KernelMemory'] == 10000000
assert c.data['dockerInspect']['HostConfig']['Memory'] == 10000000
assert c.data['dockerInspect']['HostConfig']['MemorySwappiness'] == 50
assert c.data['dockerInspect']['HostConfig']['GroupAdd'] == ['root']
assert not c.data['dockerInspect']['HostConfig']['OomKillDisable']
assert c.data['dockerInspect']['HostConfig']['OomScoreAdj'] == 500
assert c.data['dockerInspect']['HostConfig']['ShmSize'] == 67108864
run_args = "rw,noexec,nosuid,size=65536k"
assert c.data['dockerInspect']['HostConfig']['Tmpfs'] == {"/run": run_args}
assert c.data['dockerInspect']['HostConfig']['UTSMode'] == 'host'
assert c.data['dockerInspect']['HostConfig']['IpcMode'] == 'host'
assert c.data['dockerInspect']['HostConfig']['Init'] is True
host_limits = {"Name": "cpu", "Hard": 100000, "Soft": 100000}
assert c.data['dockerInspect']['HostConfig']['Ulimits'] == [host_limits]
assert c.data['dockerInspect']['Config']['StopSignal'] == 'SIGTERM'
assert c.data['dockerInspect']['Config']['StopTimeout'] == 10
@if_docker_1_12
def test_docker_extra_newfields(docker_client, super_client):
test_name = 'container_field_test'
image_uuid = 'docker:ibuildthecloud/helloworld'
sysctls = {"net.ipv4.ip_forward": "1"}
healthCmd = ["ls"]
healthInterval = 5
healthRetries = 3
healthTimeout = 60
c = docker_client.create_container(name=test_name,
imageUuid=image_uuid,
sysctls=sysctls,
healthCmd=healthCmd,
healthTimeout=healthTimeout,
healthRetries=healthRetries,
healthInterval=healthInterval)
c = super_client.wait_success(c)
wait_for(lambda: super_client.reload(c).data['dockerInspect'] is not None)
wait_for(lambda: super_client.
reload(c).data['dockerInspect']['HostConfig'] is not None)
host_sysctls = {"net.ipv4.ip_forward": "1"}
assert c.data['dockerInspect']['HostConfig']['Sysctls'] == host_sysctls
assert c.data['dockerInspect']['Config']['Healthcheck']['Test'] == ['ls']
h_interval = c.data['dockerInspect']['Config']['Healthcheck']['Interval']
assert h_interval == 5000000000
h_timeout = c.data['dockerInspect']['Config']['Healthcheck']['Timeout']
assert h_timeout == 60000000000
assert c.data['dockerInspect']['Config']['Healthcheck']['Retries'] == 3
@if_docker
def test_container_milli_cpu_reservation(docker_client, super_client):
test_name = 'container_test'
image_uuid = 'docker:ibuildthecloud/helloworld'
c = docker_client.create_container(name=test_name,
imageUuid=image_uuid,
stdinOpen=True,
tty=True,
command=["true"],
entryPoint=["/bin/sh", "-c"],
networkMode='bridge',
milliCpuReservation=2000,
cpuShares=400)
c = super_client.wait_success(c)
wait_for(lambda: super_client.reload(c).data['dockerInspect'] is not None)
wait_for(lambda: super_client.
reload(c).data['dockerInspect']['HostConfig'] is not None)
# milliCpuReservation will take precedence over cpuShares and be converted
# to a value that is (milliCpuShares / 1000) * 1024
assert c.data['dockerInspect']['HostConfig']['CpuShares'] == 2048
def get_mounts(resource):
return [x for x in resource.mounts_link() if x.state != 'inactive']
def check_mounts(client, resource, count):
def wait_for_mount_count(res):
m = get_mounts(res)
return len(m) == count
wait_for_condition(client, resource, wait_for_mount_count)
mounts = get_mounts(resource)
return mounts
def volume_cleanup_setup(docker_client, uuid, strategy=None):
labels = {}
if strategy:
labels[VOLUME_CLEANUP_LABEL] = strategy
vol_name = random_str()
c = docker_client.create_container(name="volume_cleanup_test",
imageUuid=uuid,
networkMode='bridge',
dataVolumes=['/tmp/foo',
'%s:/foo' % vol_name],
labels=labels)
c = docker_client.wait_success(c)
if strategy:
assert c.labels[VOLUME_CLEANUP_LABEL] == strategy
mounts = check_mounts(docker_client, c, 2)
v1 = mounts[0].volume()
v2 = mounts[1].volume()
wait_for_condition(docker_client, v1, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
wait_for_condition(docker_client, v2, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
named_vol = v1 if v1.name == vol_name else v2
unnamed_vol = v1 if v1.name != vol_name else v2
c = docker_client.wait_success(c.stop(remove=True, timeout=0))
safe_purge(c, docker_client)
check_mounts(docker_client, c, 0)
return c, named_vol, unnamed_vol
@if_docker
def test_cleanup_volume_strategy(docker_client):
c, named_vol, unnamed_vol = volume_cleanup_setup(docker_client,
TEST_IMAGE_UUID)
assert docker_client.wait_success(named_vol).state == 'detached'
assert docker_client.wait_success(unnamed_vol).removed is not None
c, named_vol, unnamed_vol = volume_cleanup_setup(docker_client,
TEST_IMAGE_UUID,
strategy='unnamed')
assert docker_client.wait_success(named_vol).state == 'detached'
assert docker_client.wait_success(unnamed_vol).removed is not None
c, named_vol, unnamed_vol = volume_cleanup_setup(docker_client,
TEST_IMAGE_UUID,
strategy='none')
assert docker_client.wait_success(named_vol).state == 'detached'
assert docker_client.wait_success(unnamed_vol).state == 'detached'
c, named_vol, unnamed_vol = volume_cleanup_setup(docker_client,
TEST_IMAGE_UUID,
strategy='all')
assert docker_client.wait_success(named_vol).removed is not None
assert docker_client.wait_success(unnamed_vol).removed is not None
@if_docker
def test_docker_volume_long(docker_client):
a = 'a' * 200
v = '/tmp/{}:/tmp/{}'.format(a, a)
uuid = TEST_IMAGE_UUID
c = docker_client.create_container(imageUuid=uuid,
networkMode='bridge',
dataVolumes=[v],
command=['sleep', '42'])
c = docker_client.wait_success(c)
assert c.state == 'running'
vol = c.mounts_link()[0].volume()
vol = docker_client.wait_success(vol)
assert vol.state == 'active'
@if_docker
def test_docker_mount_life_cycle(docker_client):
# Using nginx because it has a baked in volume, which is a good test case
uuid = 'docker:nginx:1.9.0'
bind_mount_uuid = py_uuid.uuid4().hex
bar_host_path = '/tmp/bar%s' % bind_mount_uuid
bar_bind_mount = '%s:/bar' % bar_host_path
c = docker_client.create_container(imageUuid=uuid,
startOnCreate=False,
networkMode='bridge',
dataVolumes=['%s:/foo' % random_str(),
bar_bind_mount])
c = docker_client.wait_success(c)
c = docker_client.wait_success(c.start())
mounts = check_mounts(docker_client, c, 3)
v1 = mounts[0].volume()
v2 = mounts[1].volume()
v3 = mounts[2].volume()
wait_for_condition(docker_client, v1, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
wait_for_condition(docker_client, v2, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
wait_for_condition(docker_client, v3, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
c = docker_client.wait_success(c.stop(timeout=0))
assert c.state == 'stopped'
wait_for_condition(docker_client, v1, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
wait_for_condition(docker_client, v2, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
wait_for_condition(docker_client, v3, lambda x: x.state == 'active',
lambda x: 'state is %s' % x)
c = docker_client.wait_success(c.remove())
check_mounts(docker_client, c, 0)
# State can be either detached or removed depending on whether c got purged
assert docker_client.wait_success(v1).state != 'active'
assert docker_client.wait_success(v2).state != 'active'
assert docker_client.wait_success(v3).state != 'active'
@if_docker
def test_docker_labels(docker_client, super_client):
# 1.8 broke this behavior where labels would come from the images
# one day maybe they will bring it back.
# image_uuid = 'docker:ranchertest/labelled:v0.1.0'
image_uuid = TEST_IMAGE_UUID
c = docker_client.create_container(name="labels_test",
imageUuid=image_uuid,
networkMode='bridge',
labels={'io.rancher.testlabel.'
'fromapi': 'yes'})
c = docker_client.wait_success(c)
def labels_callback():
labels = c.instanceLabels()
if len(labels) >= 3:
return labels
return None
labels = wait_for(labels_callback)
actual_labels = {}
for l in labels:
actual_labels[l.key] = l.value
sc = super_client.reload(c)
mac_address = sc.nics()[0].macAddress
expected_labels = {
# 'io.rancher.testlabel': 'value1',
# 'io.rancher.testlabel.space': 'value 1',
'io.rancher.testlabel.fromapi': 'yes',
'io.rancher.container.uuid': c.uuid,
'io.rancher.container.name': c.name,
'io.rancher.container.mac_address': mac_address,
}
assert actual_labels == expected_labels
docker_client.delete(c)
@if_docker
def test_container_odd_fields(super_client, docker_client):
c = docker_client.create_container(pidMode=None,
imageUuid=TEST_IMAGE_UUID,
networkMode='bridge',
logConfig={
'driver': None,
'config': None,
})
c = docker_client.wait_success(c)
assert c.state == 'running'
assert c.pidMode is None
assert c.logConfig == {'type': 'logConfig', 'driver': None, 'config': None}
c = super_client.reload(c)
assert c.data.dockerInspect.HostConfig.LogConfig['Type'] == 'json-file'
assert not c.data.dockerInspect.HostConfig.LogConfig['Config']
@if_docker
def test_container_bad_build(super_client, docker_client):
c = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
networkMode='bridge',
build={
'context': None,
'remote': None
})
c = docker_client.wait_success(c)
assert c.state == 'running'
assert c.pidMode is None
assert c.build == {'context': None, 'remote': None, 'type': 'dockerBuild'}
c = super_client.reload(c)
assert c.data.dockerInspect.Config.Image == TEST_IMAGE
@if_docker
def test_service_link_emu_docker_link(super_client, docker_client):
env_name = random_str()
env = docker_client.create_stack(name=env_name)
env = docker_client.wait_success(env)
assert env.state == "active"
server = docker_client.create_service(name='server', launchConfig={
'networkMode': 'bridge',
'imageUuid': TEST_IMAGE_UUID
}, stackId=env.id)
service = docker_client.create_service(name='client', launchConfig={
'networkMode': 'bridge',
'imageUuid': TEST_IMAGE_UUID
}, stackId=env.id)
service_link = {"serviceId": server.id, "name": "other"}
service.setservicelinks(serviceLinks=[service_link])
server = docker_client.wait_success(server)
service = docker_client.wait_success(service)
server = docker_client.wait_success(server.activate())
assert server.state == 'active'
service = docker_client.wait_success(service.activate())
assert service.state == 'active'
instance = find_one(service.instances)
instance = super_client.reload(instance)
link = find_one(instance.instanceLinks)
target_instance = find_one(server.instances)
assert link.targetInstanceId == target_instance.id
assert link.instanceNames == ['{}-server-1'.format(env_name)]
docker_client.delete(env)
@if_docker
def test_service_links_with_no_ports(docker_client):
env = docker_client.create_stack(name=random_str())
env = docker_client.wait_success(env)
assert env.state == "active"
server = docker_client.create_service(name='server', launchConfig={
'imageUuid': TEST_IMAGE_UUID,
'networkMode': 'bridge',
'stdinOpen': True,
'tty': True,
}, stackId=env.id)
server = docker_client.wait_success(server)
assert server.state == 'inactive'
service = docker_client.create_service(name='client', launchConfig={
'imageUuid': TEST_IMAGE_UUID,
'networkMode': 'bridge',
'stdinOpen': True,
'tty': True,
}, stackId=env.id)
service = docker_client.wait_success(service)
assert service.state == 'inactive'
service_link = {"serviceId": server.id, "name": "bb"}
service.setservicelinks(serviceLinks=[service_link])
server = docker_client.wait_success(server.activate())
assert server.state == 'active'
service = docker_client.wait_success(service.activate())
assert service.state == 'active'
@if_docker
def test_blkio_device_options(super_client, docker_client):
dev_opts = {
'/dev/nvme0n1': {
'readIops': 1000,
'writeIops': 2000,
},
'/dev/null': {
'readBps': 3000,
}
}
c = docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
networkMode=None,
blkioDeviceOptions=dev_opts)
c = docker_client.wait_success(c)
assert c.state == 'running'
super_c = super_client.reload(c)
hc = super_c.data.dockerInspect['HostConfig']
assert hc['BlkioDeviceReadIOps'] == [{
'Path': '/dev/nvme0n1',
'Rate': 1000
}]
assert hc['BlkioDeviceWriteIOps'] == [{
'Path': '/dev/nvme0n1',
'Rate': 2000
}]
assert hc['BlkioDeviceReadBps'] == [{
'Path': '/dev/null',
'Rate': 3000
}]
@if_resource_scheduler
def test_port_constraint(docker_client):
# Tests with the above label can only be ran when the external scheduler is
# is enabled. It isn't in CI, so we need to disable these tests by default
# They can (and should) be run locally if working on the scheduler
containers = []
try:
c = docker_client.wait_success(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['9998:81/tcp']))
containers.append(c)
# try to deploy another container with same public port + protocol
c2 = docker_client.wait_transitioning(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['9998:81/tcp']))
assert c2.transitioning == 'error'
assert '9998:81/tcp' in c2.transitioningMessage
assert c2.state == 'error'
containers.append(c2)
# try different public port
c3 = docker_client.wait_success(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['9999:81/tcp']))
containers.append(c3)
# try different protocol
c4 = docker_client.wait_success(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['9999:81/udp']))
containers.append(c4)
# UDP is now taken
c5 = docker_client.wait_transitioning(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['9999:81/udp']))
assert c5.transitioning == 'error'
assert '9999:81/udp' in c5.transitioningMessage
assert c5.state == 'error'
containers.append(c5)
# try different bind IP
c6 = docker_client.wait_success(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['127.2.2.1:9997:81/tcp']))
containers.append(c6)
# Bind IP is now taken
c7 = docker_client.wait_transitioning(
docker_client.create_container(imageUuid=TEST_IMAGE_UUID,
ports=['127.2.2.1:9997:81/tcp']))
assert c7.transitioning == 'error'
assert '127.2.2.1:9997:81/tcp' in c7.transitioningMessage
assert c7.state == 'error'
containers.append(c7)
finally:
for c in containers:
if c is not None:
c = docker_client.wait_success(docker_client.delete(c))
c.purge()
@if_resource_scheduler
def test_conflicting_ports_in_deployment_unit(docker_client):
env = docker_client.create_stack(name=random_str())
env = docker_client.wait_success(env)
assert env.state == "active"
launch_config = {"imageUuid": TEST_IMAGE_UUID, "ports": ['7777:6666']}
secondary_lc = {"imageUuid": TEST_IMAGE_UUID,
"name": "secondary", "ports": ['7777:6666']}
svc = docker_client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
secondaryLaunchConfigs=[secondary_lc])
svc = docker_client.wait_success(svc)
assert svc.state == "inactive"
svc = svc.activate()
c = _wait_for_compose_instance_error(docker_client, svc, env)
assert '7777:6666/tcp' in c.transitioningMessage
env.remove()
@if_resource_scheduler
def test_simultaneous_port_allocation(docker_client):
# This test ensures if two containers are allocated simultaneously, only
# one will get the port and the other will fail to allocate.
# By nature, this test is exercise a race condition, so it isn't perfect.
env = docker_client.create_stack(name=random_str())
env = docker_client.wait_success(env)
assert env.state == "active"
launch_config = {"imageUuid": TEST_IMAGE_UUID,
"ports": ['5555:6666']}
svc = docker_client.create_service(name=random_str(),
stackId=env.id,
launchConfig=launch_config,
scale=2)
svc = docker_client.wait_success(svc)
assert svc.state == "inactive"
svc = svc.activate()
c = _wait_for_compose_instance_error(docker_client, svc, env)
assert '5555:6666/tcp' in c.transitioningMessage
@if_resource_scheduler
def test_docker_bind_address(docker_client, super_client):
c = docker_client.create_container(name='bindAddrTest',
networkMode='bridge',
imageUuid=TEST_IMAGE_UUID,
ports=['127.0.0.1:89:8999'])
c = docker_client.wait_success(c)
assert c.state == 'running'
c = super_client.reload(c)
bindings = c.data['dockerInspect']['HostConfig']['PortBindings']
assert bindings['8999/tcp'] == [{'HostIp': '127.0.0.1', 'HostPort': '89'}]
c = docker_client.create_container(name='bindAddrTest2',
networkMode='bridge',
imageUuid=TEST_IMAGE_UUID,
ports=['127.2.2.2:89:8999'])
c = docker_client.wait_success(c)
assert c.state == 'running'
c = super_client.reload(c)
bindings = c.data['dockerInspect']['HostConfig']['PortBindings']
assert bindings['8999/tcp'] == [{'HostIp': '127.2.2.2', 'HostPort': '89'}]
c = docker_client.create_container(name='bindAddrTest3',
networkMode='bridge',
imageUuid=TEST_IMAGE_UUID,
ports=['127.2.2.2:89:8999'])
c = docker_client.wait_transitioning(c)
assert c.transitioning == 'error'
assert '127.2.2.2:89:8999' in c.transitioningMessage
assert c.state == 'error'
def _wait_for_compose_instance_error(client, service, env):
name = env.name + "-" + service.name + "%"
def check():
containers = client.list_container(name_like=name, state='error')
if len(containers) > 0:
return containers[0]
container = wait_for(check)
return container
def _check_path(volume, should_exist, client, super_client, extra_vols=None,
path_to_check=None):
if path_to_check:
path = path_to_check
else:
path = _path_to_volume(volume)
print 'Checking path [%s] for volume [%s].' % (path, volume)
data_vols = ['/var/lib/docker:/host/var/lib/docker', '/tmp:/host/tmp']
if extra_vols:
data_vols.extend(extra_vols)
c = client. \
create_container(name="volume_check" + random_str(),
imageUuid="docker:ranchertest/volume-test:v0.1.0",
networkMode=None,
environment={'TEST_PATH': path},
command='/opt/tools/check_path_exists.sh',
dataVolumes=data_vols)
c = super_client.wait_success(c)
assert c.state == 'running'
c = super_client.wait_success(c.stop())
assert c.state == 'stopped'
code = c.data.dockerInspect.State.ExitCode
# Note that the test in the container is testing to see if the path is a
# directory. Code for the test is here:
# https://github.com/rancher/test-images/tree/master/images/volume-test
if should_exist:
# The exit code of the container should be a 10 if the path existed
assert code == 10
else:
# And 11 if the path did not exist
assert code == 11
c.remove()
def _path_to_volume(volume):
path = volume.uri.replace('file://', '')
mounted_path = re.sub('^.*?/var/lib/docker', '/host/var/lib/docker',
path)
if not mounted_path.startswith('/host/var/lib/docker'):
mounted_path = re.sub('^.*?/tmp', '/host/tmp',
path)
return mounted_path
|
rancher/cattle
|
tests/integration/cattletest/core/test_docker.py
|
Python
|
apache-2.0
| 50,721
|
"""import MySQLdb
DB_HOST = '192.95.22.65'
DB_USER = 'sitursit_bot'
DB_PASS = 'RwfMXSUurWCX'
DB_NAME = 'sitursit_bot'
def run_query(query=''):
datos = [DB_HOST, DB_USER, DB_PASS, DB_NAME]
conn = MySQLdb.connect(*datos) # Conectar a la base de datos
cursor = conn.cursor() # Crear un cursor
cursor.execute(query) # Ejecutar una consulta
if query.upper().startswith('SELECT'):
data = cursor.fetchall() # Traer los resultados de un select
else:
conn.commit() # Hacer efectiva la escritura de datos
data = None
cursor.close() # Cerrar el cursor
conn.close() # Cerrar la conexión
return data"""
import sys
soyversion = sys.version
print(soyversion)
import MySQLdb
import time
def maindb():
# Connect to the MySQL database
db = MySQLdb.connect(host = '192.95.22.65', user = 'sitursit_bot', passwd = 'RwfMXSUurWCX', db = 'sitursit_bot')
#db = MySQLdb.connect(host = '23.96.113.148', user = 'SITUR_BOT', passwd = 'O9pIeuNfF1BMQM1W', db = 'SITUR_BOT')
cursor = db.cursor()
valorconsultado = "y aun mas consultas de acá"
fechayhora= str(time.time())
print(fechayhora)
fechayhora2="1505308957.3476646"
atractivos = "lago de tota"
municipios = 75
#sql2 = """INSERT INTO `atractivos_cons` (`ID`, `fecha_hora`, `sexo`, `edad`, `ubicacion`, `atractivo_buscado`) VALUES (NULL, NULL, 'M', '28', 'Líbano, Colombia', '"""+valorconsultado+"""')"""
# sql2 = """INSERT INTO `atractivo_ciudad_cons` (`ID`, `fecha_hora`, `sexo`, `edad`, `ubicacion`, `atractivo_buscado`, `ciudad_buscada`) VALUES (NULL, NULL, NULL, NULL, NULL, '"""+atractivos+"""','"""+str(municipios)+"""')"""
sql2 = """INSERT INTO `atractivo_ciudad_cons` (`ID`, `fecha_hora`, `sexo`, `edad`, `ubicacion`, `atractivo_buscado`, `ciudad_buscada`) VALUES (NULL, NULL, NULL, NULL, NULL, 'parquecitos','95')"""
cursor.execute(sql2)
db.close()
def maindb2():
# Connect to the MySQL database
db = MySQLdb.connect(host = 'localhost', user = 'admin', passwd = 'd12190', db = 'mysql')
# Check if connection was successful
if (db):
# Carry out normal procedure
databasemauricio= "Connection successful"
print("me conecté hpt")
else:
# Terminate
databasemauricio = "Connection unsuccessful"
print("no me pude conectar")
maindb()
|
mahoobox/situr3
|
escribirmysql.py
|
Python
|
apache-2.0
| 2,461
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for ComputeManager()."""
import contextlib
import time
import uuid
from cinderclient import exceptions as cinder_exception
from eventlet import event as eventlet_event
import mock
from mox3 import mox
import netaddr
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import timeutils
from oslo_utils import uuidutils
import six
import nova
from nova.compute import build_results
from nova.compute import manager
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import api as conductor_api
from nova import context
from nova import db
from nova import exception
from nova.network import api as network_api
from nova.network import model as network_model
from nova import objects
from nova.objects import block_device as block_device_obj
from nova import test
from nova.tests.unit.compute import fake_resource_tracker
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_network
from nova.tests.unit import fake_network_cache_model
from nova.tests.unit import fake_server_actions
from nova.tests.unit.objects import test_instance_fault
from nova.tests.unit.objects import test_instance_info_cache
from nova import utils
from nova.virt import driver as virt_driver
from nova.virt import event as virtevent
from nova.virt import fake as fake_driver
from nova.virt import hardware
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
class ComputeManagerUnitTestCase(test.NoDBTestCase):
def setUp(self):
super(ComputeManagerUnitTestCase, self).setUp()
self.flags(use_local=True, group='conductor')
self.compute = importutils.import_object(CONF.compute_manager)
self.context = context.RequestContext('fake', 'fake')
fake_server_actions.stub_out_action_events(self.stubs)
@mock.patch.object(manager.ComputeManager, '_get_power_state')
@mock.patch.object(manager.ComputeManager, '_sync_instance_power_state')
@mock.patch.object(objects.Instance, 'get_by_uuid')
def _test_handle_lifecycle_event(self, mock_get, mock_sync,
mock_get_power_state, transition,
event_pwr_state, current_pwr_state):
event = mock.Mock()
event.get_instance_uuid.return_value = mock.sentinel.uuid
event.get_transition.return_value = transition
mock_get_power_state.return_value = current_pwr_state
self.compute.handle_lifecycle_event(event)
mock_get.assert_called_with(mock.ANY, mock.sentinel.uuid,
expected_attrs=[])
if event_pwr_state == current_pwr_state:
mock_sync.assert_called_with(mock.ANY, mock_get.return_value,
event_pwr_state)
else:
self.assertFalse(mock_sync.called)
def test_handle_lifecycle_event(self):
event_map = {virtevent.EVENT_LIFECYCLE_STOPPED: power_state.SHUTDOWN,
virtevent.EVENT_LIFECYCLE_STARTED: power_state.RUNNING,
virtevent.EVENT_LIFECYCLE_PAUSED: power_state.PAUSED,
virtevent.EVENT_LIFECYCLE_RESUMED: power_state.RUNNING,
virtevent.EVENT_LIFECYCLE_SUSPENDED:
power_state.SUSPENDED,
}
for transition, pwr_state in six.iteritems(event_map):
self._test_handle_lifecycle_event(transition=transition,
event_pwr_state=pwr_state,
current_pwr_state=pwr_state)
def test_handle_lifecycle_event_state_mismatch(self):
self._test_handle_lifecycle_event(
transition=virtevent.EVENT_LIFECYCLE_STOPPED,
event_pwr_state=power_state.SHUTDOWN,
current_pwr_state=power_state.RUNNING)
def test_delete_instance_info_cache_delete_ordering(self):
call_tracker = mock.Mock()
call_tracker.clear_events_for_instance.return_value = None
mgr_class = self.compute.__class__
orig_delete = mgr_class._delete_instance
specd_compute = mock.create_autospec(mgr_class)
# spec out everything except for the method we really want
# to test, then use call_tracker to verify call sequence
specd_compute._delete_instance = orig_delete
mock_inst = mock.Mock()
mock_inst.uuid = 'inst-1'
mock_inst.save = mock.Mock()
mock_inst.destroy = mock.Mock()
mock_inst.system_metadata = mock.Mock()
def _mark_notify(*args, **kwargs):
call_tracker._notify_about_instance_usage(*args, **kwargs)
def _mark_shutdown(*args, **kwargs):
call_tracker._shutdown_instance(*args, **kwargs)
specd_compute.instance_events = call_tracker
specd_compute._notify_about_instance_usage = _mark_notify
specd_compute._shutdown_instance = _mark_shutdown
mock_inst.info_cache = call_tracker
specd_compute._delete_instance(specd_compute,
self.context,
mock_inst,
mock.Mock(),
mock.Mock())
methods_called = [n for n, a, k in call_tracker.mock_calls]
self.assertEqual(['clear_events_for_instance',
'_notify_about_instance_usage',
'_shutdown_instance', 'delete'],
methods_called)
@mock.patch.object(manager.ComputeManager, '_get_resource_tracker')
@mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
@mock.patch.object(manager.ComputeManager, '_get_compute_nodes_in_db')
def test_update_available_resource(self, get_db_nodes, get_avail_nodes,
get_rt):
info = {'cn_id': 1}
def _make_compute_node(hyp_hostname):
cn = mock.Mock(spec_set=['hypervisor_hostname', 'id',
'destroy'])
cn.id = info['cn_id']
info['cn_id'] += 1
cn.hypervisor_hostname = hyp_hostname
return cn
def _make_rt(node):
n = mock.Mock(spec_set=['update_available_resource',
'nodename'])
n.nodename = node
return n
ctxt = mock.Mock()
db_nodes = [_make_compute_node('node1'),
_make_compute_node('node2'),
_make_compute_node('node3'),
_make_compute_node('node4')]
avail_nodes = set(['node2', 'node3', 'node4', 'node5'])
avail_nodes_l = list(avail_nodes)
rts = [_make_rt(node) for node in avail_nodes_l]
# Make the 2nd and 3rd ones raise
exc = exception.ComputeHostNotFound(host='fake')
rts[1].update_available_resource.side_effect = exc
exc = test.TestingException()
rts[2].update_available_resource.side_effect = exc
rts_iter = iter(rts)
def _get_rt_side_effect(*args, **kwargs):
return next(rts_iter)
expected_rt_dict = {avail_nodes_l[0]: rts[0],
avail_nodes_l[2]: rts[2],
avail_nodes_l[3]: rts[3]}
get_db_nodes.return_value = db_nodes
get_avail_nodes.return_value = avail_nodes
get_rt.side_effect = _get_rt_side_effect
self.compute.update_available_resource(ctxt)
get_db_nodes.assert_called_once_with(ctxt, use_slave=True)
self.assertEqual([mock.call(node) for node in avail_nodes],
get_rt.call_args_list)
for rt in rts:
rt.update_available_resource.assert_called_once_with(ctxt)
self.assertEqual(expected_rt_dict,
self.compute._resource_tracker_dict)
# First node in set should have been removed from DB
for db_node in db_nodes:
if db_node.hypervisor_hostname == 'node1':
db_node.destroy.assert_called_once_with()
else:
self.assertFalse(db_node.destroy.called)
def test_delete_instance_without_info_cache(self):
instance = fake_instance.fake_instance_obj(
self.context,
uuid='fake',
vm_state=vm_states.ERROR,
host=self.compute.host,
expected_attrs=['system_metadata'])
quotas = mock.create_autospec(objects.Quotas, spec_set=True)
with contextlib.nested(
mock.patch.object(self.compute, '_notify_about_instance_usage'),
mock.patch.object(self.compute, '_shutdown_instance'),
mock.patch.object(instance, 'obj_load_attr'),
mock.patch.object(instance, 'save'),
mock.patch.object(instance, 'destroy')
) as (
compute_notify_about_instance_usage, comupte_shutdown_instance,
instance_obj_load_attr, instance_save, instance_destroy
):
instance.info_cache = None
self.compute._delete_instance(self.context, instance, [], quotas)
@mock.patch.object(network_api.API, 'allocate_for_instance')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(time, 'sleep')
def test_allocate_network_succeeds_after_retries(
self, mock_sleep, mock_save, mock_allocate_for_instance):
self.flags(network_allocate_retries=8)
instance = fake_instance.fake_instance_obj(
self.context, expected_attrs=['system_metadata'])
is_vpn = 'fake-is-vpn'
req_networks = 'fake-req-networks'
macs = 'fake-macs'
sec_groups = 'fake-sec-groups'
final_result = 'meow'
dhcp_options = None
mock_allocate_for_instance.side_effect = [
test.TestingException()] * 7 + [final_result]
expected_sleep_times = [1, 2, 4, 8, 16, 30, 30, 30]
res = self.compute._allocate_network_async(self.context, instance,
req_networks,
macs,
sec_groups,
is_vpn,
dhcp_options)
mock_sleep.has_calls(expected_sleep_times)
self.assertEqual(final_result, res)
# Ensure save is not called in while allocating networks, the instance
# is saved after the allocation.
self.assertFalse(mock_save.called)
self.assertEqual('True', instance.system_metadata['network_allocated'])
def test_allocate_network_fails(self):
self.flags(network_allocate_retries=0)
nwapi = self.compute.network_api
self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
instance = {}
is_vpn = 'fake-is-vpn'
req_networks = 'fake-req-networks'
macs = 'fake-macs'
sec_groups = 'fake-sec-groups'
dhcp_options = None
nwapi.allocate_for_instance(
self.context, instance, vpn=is_vpn,
requested_networks=req_networks, macs=macs,
security_groups=sec_groups,
dhcp_options=dhcp_options).AndRaise(test.TestingException())
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.compute._allocate_network_async,
self.context, instance, req_networks, macs,
sec_groups, is_vpn, dhcp_options)
def test_allocate_network_neg_conf_value_treated_as_zero(self):
self.flags(network_allocate_retries=-1)
nwapi = self.compute.network_api
self.mox.StubOutWithMock(nwapi, 'allocate_for_instance')
instance = {}
is_vpn = 'fake-is-vpn'
req_networks = 'fake-req-networks'
macs = 'fake-macs'
sec_groups = 'fake-sec-groups'
dhcp_options = None
# Only attempted once.
nwapi.allocate_for_instance(
self.context, instance, vpn=is_vpn,
requested_networks=req_networks, macs=macs,
security_groups=sec_groups,
dhcp_options=dhcp_options).AndRaise(test.TestingException())
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.compute._allocate_network_async,
self.context, instance, req_networks, macs,
sec_groups, is_vpn, dhcp_options)
@mock.patch.object(network_api.API, 'allocate_for_instance')
@mock.patch.object(manager.ComputeManager, '_instance_update')
@mock.patch.object(time, 'sleep')
def test_allocate_network_with_conf_value_is_one(
self, sleep, _instance_update, allocate_for_instance):
self.flags(network_allocate_retries=1)
instance = fake_instance.fake_instance_obj(
self.context, expected_attrs=['system_metadata'])
is_vpn = 'fake-is-vpn'
req_networks = 'fake-req-networks'
macs = 'fake-macs'
sec_groups = 'fake-sec-groups'
dhcp_options = None
final_result = 'zhangtralon'
allocate_for_instance.side_effect = [test.TestingException(),
final_result]
res = self.compute._allocate_network_async(self.context, instance,
req_networks,
macs,
sec_groups,
is_vpn,
dhcp_options)
self.assertEqual(final_result, res)
self.assertEqual(1, sleep.call_count)
@mock.patch('nova.utils.spawn_n')
@mock.patch('nova.compute.manager.ComputeManager.'
'_do_build_and_run_instance')
def _test_max_concurrent_builds(self, mock_dbari, mock_spawn):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
with mock.patch.object(self.compute,
'_build_semaphore') as mock_sem:
instance = objects.Instance(uuid=str(uuid.uuid4()))
for i in (1, 2, 3):
self.compute.build_and_run_instance(self.context, instance,
mock.sentinel.image,
mock.sentinel.request_spec,
{})
self.assertEqual(3, mock_sem.__enter__.call_count)
def test_max_concurrent_builds_limited(self):
self.flags(max_concurrent_builds=2)
self._test_max_concurrent_builds()
def test_max_concurrent_builds_unlimited(self):
self.flags(max_concurrent_builds=0)
self._test_max_concurrent_builds()
def test_max_concurrent_builds_semaphore_limited(self):
self.flags(max_concurrent_builds=123)
self.assertEqual(123,
manager.ComputeManager()._build_semaphore.balance)
def test_max_concurrent_builds_semaphore_unlimited(self):
self.flags(max_concurrent_builds=0)
compute = manager.ComputeManager()
self.assertEqual(0, compute._build_semaphore.balance)
self.assertIsInstance(compute._build_semaphore,
compute_utils.UnlimitedSemaphore)
def test_nil_out_inst_obj_host_and_node_sets_nil(self):
instance = fake_instance.fake_instance_obj(self.context,
uuid='foo-uuid',
host='foo-host',
node='foo-node')
self.assertIsNotNone(instance.host)
self.assertIsNotNone(instance.node)
self.compute._nil_out_instance_obj_host_and_node(instance)
self.assertIsNone(instance.host)
self.assertIsNone(instance.node)
def test_init_host(self):
our_host = self.compute.host
inst = fake_instance.fake_db_instance(
vm_state=vm_states.ACTIVE,
info_cache=dict(test_instance_info_cache.fake_info_cache,
network_info=None),
security_groups=None)
startup_instances = [inst, inst, inst]
def _do_mock_calls(defer_iptables_apply):
self.compute.driver.init_host(host=our_host)
context.get_admin_context().AndReturn(self.context)
db.instance_get_all_by_host(
self.context, our_host,
columns_to_join=['info_cache', 'metadata'],
use_slave=False
).AndReturn(startup_instances)
if defer_iptables_apply:
self.compute.driver.filter_defer_apply_on()
self.compute._destroy_evacuated_instances(self.context)
self.compute._init_instance(self.context,
mox.IsA(objects.Instance))
self.compute._init_instance(self.context,
mox.IsA(objects.Instance))
self.compute._init_instance(self.context,
mox.IsA(objects.Instance))
if defer_iptables_apply:
self.compute.driver.filter_defer_apply_off()
self.mox.StubOutWithMock(self.compute.driver, 'init_host')
self.mox.StubOutWithMock(self.compute.driver,
'filter_defer_apply_on')
self.mox.StubOutWithMock(self.compute.driver,
'filter_defer_apply_off')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(self.compute,
'_destroy_evacuated_instances')
self.mox.StubOutWithMock(self.compute,
'_init_instance')
# Test with defer_iptables_apply
self.flags(defer_iptables_apply=True)
_do_mock_calls(True)
self.mox.ReplayAll()
self.compute.init_host()
self.mox.VerifyAll()
# Test without defer_iptables_apply
self.mox.ResetAll()
self.flags(defer_iptables_apply=False)
_do_mock_calls(False)
self.mox.ReplayAll()
self.compute.init_host()
# tearDown() uses context.get_admin_context(), so we have
# to do the verification here and unstub it.
self.mox.VerifyAll()
self.mox.UnsetStubs()
@mock.patch('nova.objects.InstanceList')
@mock.patch('nova.objects.MigrationList.get_by_filters')
def test_cleanup_host(self, mock_miglist_get, mock_instance_list):
# just testing whether the cleanup_host method
# when fired will invoke the underlying driver's
# equivalent method.
mock_miglist_get.return_value = []
mock_instance_list.get_by_host.return_value = []
with mock.patch.object(self.compute, 'driver') as mock_driver:
self.compute.init_host()
mock_driver.init_host.assert_called_once_with(host='fake-mini')
self.compute.cleanup_host()
# register_event_listener is called on startup (init_host) and
# in cleanup_host
mock_driver.register_event_listener.assert_has_calls([
mock.call(self.compute.handle_events), mock.call(None)])
mock_driver.cleanup_host.assert_called_once_with(host='fake-mini')
def test_init_virt_events_disabled(self):
self.flags(handle_virt_lifecycle_events=False, group='workarounds')
with mock.patch.object(self.compute.driver,
'register_event_listener') as mock_register:
self.compute.init_virt_events()
self.assertFalse(mock_register.called)
@mock.patch('nova.objects.MigrationList.get_by_filters')
@mock.patch('nova.objects.Migration.save')
def test_init_host_with_evacuated_instance(self, mock_save, mock_mig_get):
our_host = self.compute.host
not_our_host = 'not-' + our_host
deleted_instance = fake_instance.fake_instance_obj(
self.context, host=not_our_host, uuid='fake-uuid')
migration = objects.Migration(instance_uuid=deleted_instance.uuid)
mock_mig_get.return_value = [migration]
self.mox.StubOutWithMock(self.compute.driver, 'init_host')
self.mox.StubOutWithMock(self.compute.driver, 'destroy')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(context, 'get_admin_context')
self.mox.StubOutWithMock(self.compute, 'init_virt_events')
self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
self.mox.StubOutWithMock(self.compute, '_init_instance')
self.mox.StubOutWithMock(self.compute.network_api,
'get_instance_nw_info')
self.compute.driver.init_host(host=our_host)
context.get_admin_context().AndReturn(self.context)
db.instance_get_all_by_host(self.context, our_host,
columns_to_join=['info_cache', 'metadata'],
use_slave=False
).AndReturn([])
self.compute.init_virt_events()
# simulate failed instance
self.compute._get_instances_on_driver(
self.context, {'deleted': False}).AndReturn([deleted_instance])
self.compute.network_api.get_instance_nw_info(
self.context, deleted_instance).AndRaise(
exception.InstanceNotFound(instance_id=deleted_instance['uuid']))
# ensure driver.destroy is called so that driver may
# clean up any dangling files
self.compute.driver.destroy(self.context, deleted_instance,
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.compute.init_host()
# tearDown() uses context.get_admin_context(), so we have
# to do the verification here and unstub it.
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_init_instance_with_binding_failed_vif_type(self):
# this instance will plug a 'binding_failed' vif
instance = fake_instance.fake_instance_obj(
self.context,
uuid='fake-uuid',
info_cache=None,
power_state=power_state.RUNNING,
vm_state=vm_states.ACTIVE,
task_state=None,
host=self.compute.host,
expected_attrs=['info_cache'])
with contextlib.nested(
mock.patch.object(context, 'get_admin_context',
return_value=self.context),
mock.patch.object(compute_utils, 'get_nw_info_for_instance',
return_value=network_model.NetworkInfo()),
mock.patch.object(self.compute.driver, 'plug_vifs',
side_effect=exception.VirtualInterfacePlugException(
"Unexpected vif_type=binding_failed")),
mock.patch.object(self.compute, '_set_instance_obj_error_state')
) as (get_admin_context, get_nw_info, plug_vifs, set_error_state):
self.compute._init_instance(self.context, instance)
set_error_state.assert_called_once_with(self.context, instance)
def test__get_power_state_InstanceNotFound(self):
instance = fake_instance.fake_instance_obj(
self.context,
power_state=power_state.RUNNING)
with mock.patch.object(self.compute.driver,
'get_info',
side_effect=exception.InstanceNotFound(instance_id=1)):
self.assertEqual(self.compute._get_power_state(self.context,
instance),
power_state.NOSTATE)
def test__get_power_state_NotFound(self):
instance = fake_instance.fake_instance_obj(
self.context,
power_state=power_state.RUNNING)
with mock.patch.object(self.compute.driver,
'get_info',
side_effect=exception.NotFound()):
self.assertRaises(exception.NotFound,
self.compute._get_power_state,
self.context, instance)
def test_init_instance_failed_resume_sets_error(self):
instance = fake_instance.fake_instance_obj(
self.context,
uuid='fake-uuid',
info_cache=None,
power_state=power_state.RUNNING,
vm_state=vm_states.ACTIVE,
task_state=None,
host=self.compute.host,
expected_attrs=['info_cache'])
self.flags(resume_guests_state_on_host_boot=True)
self.mox.StubOutWithMock(self.compute, '_get_power_state')
self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
self.mox.StubOutWithMock(self.compute.driver,
'resume_state_on_host_boot')
self.mox.StubOutWithMock(self.compute,
'_get_instance_block_device_info')
self.mox.StubOutWithMock(self.compute,
'_set_instance_obj_error_state')
self.compute._get_power_state(mox.IgnoreArg(),
instance).AndReturn(power_state.SHUTDOWN)
self.compute._get_power_state(mox.IgnoreArg(),
instance).AndReturn(power_state.SHUTDOWN)
self.compute._get_power_state(mox.IgnoreArg(),
instance).AndReturn(power_state.SHUTDOWN)
self.compute.driver.plug_vifs(instance, mox.IgnoreArg())
self.compute._get_instance_block_device_info(mox.IgnoreArg(),
instance).AndReturn('fake-bdm')
self.compute.driver.resume_state_on_host_boot(mox.IgnoreArg(),
instance, mox.IgnoreArg(),
'fake-bdm').AndRaise(test.TestingException)
self.compute._set_instance_obj_error_state(mox.IgnoreArg(), instance)
self.mox.ReplayAll()
self.compute._init_instance('fake-context', instance)
@mock.patch.object(objects.BlockDeviceMapping, 'destroy')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(objects.Instance, 'destroy')
@mock.patch.object(objects.Instance, 'obj_load_attr')
@mock.patch.object(objects.quotas.Quotas, 'commit')
@mock.patch.object(objects.quotas.Quotas, 'reserve')
@mock.patch.object(objects.quotas, 'ids_from_instance')
def test_init_instance_complete_partial_deletion(
self, mock_ids_from_instance, mock_reserve, mock_commit,
mock_inst_destroy, mock_obj_load_attr, mock_get_by_instance_uuid,
mock_bdm_destroy):
"""Test to complete deletion for instances in DELETED status but not
marked as deleted in the DB
"""
instance = fake_instance.fake_instance_obj(
self.context,
project_id='fake',
uuid='fake-uuid',
vcpus=1,
memory_mb=64,
power_state=power_state.SHUTDOWN,
vm_state=vm_states.DELETED,
host=self.compute.host,
task_state=None,
deleted=False,
deleted_at=None,
metadata={},
system_metadata={},
expected_attrs=['metadata', 'system_metadata'])
# Make sure instance vm_state is marked as 'DELETED' but instance is
# not destroyed from db.
self.assertEqual(vm_states.DELETED, instance.vm_state)
self.assertFalse(instance.deleted)
deltas = {'instances': -1,
'cores': -instance.vcpus,
'ram': -instance.memory_mb}
def fake_inst_destroy():
instance.deleted = True
instance.deleted_at = timeutils.utcnow()
mock_ids_from_instance.return_value = (instance.project_id,
instance.user_id)
mock_inst_destroy.side_effect = fake_inst_destroy()
self.compute._init_instance(self.context, instance)
# Make sure that instance.destroy method was called and
# instance was deleted from db.
self.assertTrue(mock_reserve.called)
self.assertTrue(mock_commit.called)
self.assertNotEqual(0, instance.deleted)
mock_reserve.assert_called_once_with(project_id=instance.project_id,
user_id=instance.user_id,
**deltas)
@mock.patch('nova.compute.manager.LOG')
def test_init_instance_complete_partial_deletion_raises_exception(
self, mock_log):
instance = fake_instance.fake_instance_obj(
self.context,
project_id='fake',
uuid='fake-uuid',
vcpus=1,
memory_mb=64,
power_state=power_state.SHUTDOWN,
vm_state=vm_states.DELETED,
host=self.compute.host,
task_state=None,
deleted=False,
deleted_at=None,
metadata={},
system_metadata={},
expected_attrs=['metadata', 'system_metadata'])
with mock.patch.object(self.compute,
'_complete_partial_deletion') as mock_deletion:
mock_deletion.side_effect = test.TestingException()
self.compute._init_instance(self, instance)
msg = u'Failed to complete a deletion'
mock_log.exception.assert_called_once_with(msg, instance=instance)
def test_init_instance_stuck_in_deleting(self):
instance = fake_instance.fake_instance_obj(
self.context,
project_id='fake',
uuid='fake-uuid',
vcpus=1,
memory_mb=64,
power_state=power_state.RUNNING,
vm_state=vm_states.ACTIVE,
host=self.compute.host,
task_state=task_states.DELETING)
self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
self.mox.StubOutWithMock(self.compute, '_delete_instance')
self.mox.StubOutWithMock(instance, 'obj_load_attr')
self.mox.StubOutWithMock(self.compute, '_create_reservations')
bdms = []
quotas = objects.quotas.Quotas(self.context)
instance.obj_load_attr('metadata')
instance.obj_load_attr('system_metadata')
objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, instance.uuid).AndReturn(bdms)
self.compute._create_reservations(self.context, instance,
instance.project_id,
instance.user_id).AndReturn(quotas)
self.compute._delete_instance(self.context, instance, bdms,
mox.IgnoreArg())
self.mox.ReplayAll()
self.compute._init_instance(self.context, instance)
@mock.patch.object(objects.Instance, 'get_by_uuid')
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
def test_init_instance_stuck_in_deleting_raises_exception(
self, mock_get_by_instance_uuid, mock_get_by_uuid):
instance = fake_instance.fake_instance_obj(
self.context,
project_id='fake',
uuid='fake-uuid',
vcpus=1,
memory_mb=64,
metadata={},
system_metadata={},
host=self.compute.host,
vm_state=vm_states.ACTIVE,
task_state=task_states.DELETING,
expected_attrs=['metadata', 'system_metadata'])
bdms = []
reservations = ['fake-resv']
def _create_patch(name, attr):
patcher = mock.patch.object(name, attr)
mocked_obj = patcher.start()
self.addCleanup(patcher.stop)
return mocked_obj
mock_delete_instance = _create_patch(self.compute, '_delete_instance')
mock_set_instance_error_state = _create_patch(
self.compute, '_set_instance_obj_error_state')
mock_create_reservations = _create_patch(self.compute,
'_create_reservations')
mock_create_reservations.return_value = reservations
mock_get_by_instance_uuid.return_value = bdms
mock_get_by_uuid.return_value = instance
mock_delete_instance.side_effect = test.TestingException('test')
self.compute._init_instance(self.context, instance)
mock_set_instance_error_state.assert_called_once_with(
self.context, instance)
def _test_init_instance_reverts_crashed_migrations(self,
old_vm_state=None):
power_on = True if (not old_vm_state or
old_vm_state == vm_states.ACTIVE) else False
sys_meta = {
'old_vm_state': old_vm_state
}
instance = fake_instance.fake_instance_obj(
self.context,
uuid='foo',
vm_state=vm_states.ERROR,
task_state=task_states.RESIZE_MIGRATING,
power_state=power_state.SHUTDOWN,
system_metadata=sys_meta,
host=self.compute.host,
expected_attrs=['system_metadata'])
self.mox.StubOutWithMock(compute_utils, 'get_nw_info_for_instance')
self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
self.mox.StubOutWithMock(self.compute.driver,
'finish_revert_migration')
self.mox.StubOutWithMock(self.compute,
'_get_instance_block_device_info')
self.mox.StubOutWithMock(self.compute.driver, 'get_info')
self.mox.StubOutWithMock(instance, 'save')
self.mox.StubOutWithMock(self.compute, '_retry_reboot')
self.compute._retry_reboot(self.context, instance).AndReturn(
(False, None))
compute_utils.get_nw_info_for_instance(instance).AndReturn(
network_model.NetworkInfo())
self.compute.driver.plug_vifs(instance, [])
self.compute._get_instance_block_device_info(
self.context, instance).AndReturn([])
self.compute.driver.finish_revert_migration(self.context, instance,
[], [], power_on)
instance.save()
self.compute.driver.get_info(instance).AndReturn(
hardware.InstanceInfo(state=power_state.SHUTDOWN))
self.compute.driver.get_info(instance).AndReturn(
hardware.InstanceInfo(state=power_state.SHUTDOWN))
self.mox.ReplayAll()
self.compute._init_instance(self.context, instance)
self.assertIsNone(instance.task_state)
def test_init_instance_reverts_crashed_migration_from_active(self):
self._test_init_instance_reverts_crashed_migrations(
old_vm_state=vm_states.ACTIVE)
def test_init_instance_reverts_crashed_migration_from_stopped(self):
self._test_init_instance_reverts_crashed_migrations(
old_vm_state=vm_states.STOPPED)
def test_init_instance_reverts_crashed_migration_no_old_state(self):
self._test_init_instance_reverts_crashed_migrations(old_vm_state=None)
def test_init_instance_resets_crashed_live_migration(self):
instance = fake_instance.fake_instance_obj(
self.context,
uuid='foo',
vm_state=vm_states.ACTIVE,
host=self.compute.host,
task_state=task_states.MIGRATING)
with contextlib.nested(
mock.patch.object(instance, 'save'),
mock.patch('nova.compute.utils.get_nw_info_for_instance',
return_value=network_model.NetworkInfo())
) as (save, get_nw_info):
self.compute._init_instance(self.context, instance)
save.assert_called_once_with(expected_task_state=['migrating'])
get_nw_info.assert_called_once_with(instance)
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
def _test_init_instance_sets_building_error(self, vm_state,
task_state=None):
instance = fake_instance.fake_instance_obj(
self.context,
uuid='foo',
vm_state=vm_state,
host=self.compute.host,
task_state=task_state)
with mock.patch.object(instance, 'save') as save:
self.compute._init_instance(self.context, instance)
save.assert_called_once_with()
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.ERROR, instance.vm_state)
def test_init_instance_sets_building_error(self):
self._test_init_instance_sets_building_error(vm_states.BUILDING)
def test_init_instance_sets_rebuilding_errors(self):
tasks = [task_states.REBUILDING,
task_states.REBUILD_BLOCK_DEVICE_MAPPING,
task_states.REBUILD_SPAWNING]
vms = [vm_states.ACTIVE, vm_states.STOPPED]
for vm_state in vms:
for task_state in tasks:
self._test_init_instance_sets_building_error(
vm_state, task_state)
def _test_init_instance_sets_building_tasks_error(self, instance):
instance.host = self.compute.host
with mock.patch.object(instance, 'save') as save:
self.compute._init_instance(self.context, instance)
save.assert_called_once_with()
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.ERROR, instance.vm_state)
def test_init_instance_sets_building_tasks_error_scheduling(self):
instance = fake_instance.fake_instance_obj(
self.context,
uuid='foo',
vm_state=None,
task_state=task_states.SCHEDULING)
self._test_init_instance_sets_building_tasks_error(instance)
def test_init_instance_sets_building_tasks_error_block_device(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = None
instance.task_state = task_states.BLOCK_DEVICE_MAPPING
self._test_init_instance_sets_building_tasks_error(instance)
def test_init_instance_sets_building_tasks_error_networking(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = None
instance.task_state = task_states.NETWORKING
self._test_init_instance_sets_building_tasks_error(instance)
def test_init_instance_sets_building_tasks_error_spawning(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = None
instance.task_state = task_states.SPAWNING
self._test_init_instance_sets_building_tasks_error(instance)
def _test_init_instance_cleans_image_states(self, instance):
with mock.patch.object(instance, 'save') as save:
self.compute._get_power_state = mock.Mock()
self.compute.driver.post_interrupted_snapshot_cleanup = mock.Mock()
instance.info_cache = None
instance.power_state = power_state.RUNNING
instance.host = self.compute.host
self.compute._init_instance(self.context, instance)
save.assert_called_once_with()
self.compute.driver.post_interrupted_snapshot_cleanup.\
assert_called_once_with(self.context, instance)
self.assertIsNone(instance.task_state)
@mock.patch('nova.compute.manager.ComputeManager._get_power_state',
return_value=power_state.RUNNING)
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
def _test_init_instance_cleans_task_states(self, powerstate, state,
mock_get_uuid, mock_get_power_state):
instance = objects.Instance(self.context)
instance.uuid = 'fake-uuid'
instance.info_cache = None
instance.power_state = power_state.RUNNING
instance.vm_state = vm_states.ACTIVE
instance.task_state = state
instance.host = self.compute.host
mock_get_power_state.return_value = powerstate
self.compute._init_instance(self.context, instance)
return instance
def test_init_instance_cleans_image_state_pending_upload(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_PENDING_UPLOAD
self._test_init_instance_cleans_image_states(instance)
def test_init_instance_cleans_image_state_uploading(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_UPLOADING
self._test_init_instance_cleans_image_states(instance)
def test_init_instance_cleans_image_state_snapshot(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_SNAPSHOT
self._test_init_instance_cleans_image_states(instance)
def test_init_instance_cleans_image_state_snapshot_pending(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
self._test_init_instance_cleans_image_states(instance)
@mock.patch.object(objects.Instance, 'save')
def test_init_instance_cleans_running_pausing(self, mock_save):
instance = self._test_init_instance_cleans_task_states(
power_state.RUNNING, task_states.PAUSING)
mock_save.assert_called_once_with()
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
@mock.patch.object(objects.Instance, 'save')
def test_init_instance_cleans_running_unpausing(self, mock_save):
instance = self._test_init_instance_cleans_task_states(
power_state.RUNNING, task_states.UNPAUSING)
mock_save.assert_called_once_with()
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
@mock.patch('nova.compute.manager.ComputeManager.unpause_instance')
def test_init_instance_cleans_paused_unpausing(self, mock_unpause):
def fake_unpause(context, instance):
instance.task_state = None
mock_unpause.side_effect = fake_unpause
instance = self._test_init_instance_cleans_task_states(
power_state.PAUSED, task_states.UNPAUSING)
mock_unpause.assert_called_once_with(self.context, instance)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
def test_init_instance_errors_when_not_migrating(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = vm_states.ERROR
instance.task_state = task_states.IMAGE_UPLOADING
instance.host = self.compute.host
self.mox.StubOutWithMock(compute_utils, 'get_nw_info_for_instance')
self.mox.ReplayAll()
self.compute._init_instance(self.context, instance)
self.mox.VerifyAll()
def test_init_instance_deletes_error_deleting_instance(self):
instance = fake_instance.fake_instance_obj(
self.context,
project_id='fake',
uuid='fake-uuid',
vcpus=1,
memory_mb=64,
vm_state=vm_states.ERROR,
host=self.compute.host,
task_state=task_states.DELETING)
self.mox.StubOutWithMock(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
self.mox.StubOutWithMock(self.compute, '_delete_instance')
self.mox.StubOutWithMock(instance, 'obj_load_attr')
self.mox.StubOutWithMock(objects.quotas, 'ids_from_instance')
self.mox.StubOutWithMock(self.compute, '_create_reservations')
bdms = []
quotas = objects.quotas.Quotas(self.context)
instance.obj_load_attr('metadata')
instance.obj_load_attr('system_metadata')
objects.BlockDeviceMappingList.get_by_instance_uuid(
self.context, instance.uuid).AndReturn(bdms)
objects.quotas.ids_from_instance(self.context, instance).AndReturn(
(instance.project_id, instance.user_id))
self.compute._create_reservations(self.context, instance,
instance.project_id,
instance.user_id).AndReturn(quotas)
self.compute._delete_instance(self.context, instance, bdms,
mox.IgnoreArg())
self.mox.ReplayAll()
self.compute._init_instance(self.context, instance)
self.mox.VerifyAll()
def test_init_instance_resize_prep(self):
instance = fake_instance.fake_instance_obj(
self.context,
uuid='fake',
vm_state=vm_states.ACTIVE,
host=self.compute.host,
task_state=task_states.RESIZE_PREP,
power_state=power_state.RUNNING)
with contextlib.nested(
mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.RUNNING),
mock.patch.object(compute_utils, 'get_nw_info_for_instance'),
mock.patch.object(instance, 'save', autospec=True)
) as (mock_get_power_state, mock_nw_info, mock_instance_save):
self.compute._init_instance(self.context, instance)
mock_instance_save.assert_called_once_with()
self.assertIsNone(instance.task_state)
@mock.patch('nova.context.RequestContext.elevated')
@mock.patch('nova.compute.utils.get_nw_info_for_instance')
@mock.patch(
'nova.compute.manager.ComputeManager._get_instance_block_device_info')
@mock.patch('nova.virt.driver.ComputeDriver.destroy')
@mock.patch('nova.virt.driver.ComputeDriver.get_volume_connector')
def _test_shutdown_instance_exception(self, exc, mock_connector,
mock_destroy, mock_blk_device_info, mock_nw_info, mock_elevated):
mock_connector.side_effect = exc
mock_elevated.return_value = self.context
instance = fake_instance.fake_instance_obj(
self.context,
uuid='fake',
vm_state=vm_states.ERROR,
task_state=task_states.DELETING)
bdms = [mock.Mock(id=1, is_volume=True)]
self.compute._shutdown_instance(self.context, instance, bdms,
notify=False, try_deallocate_networks=False)
def test_shutdown_instance_endpoint_not_found(self):
exc = cinder_exception.EndpointNotFound
self._test_shutdown_instance_exception(exc)
def test_shutdown_instance_client_exception(self):
exc = cinder_exception.ClientException
self._test_shutdown_instance_exception(exc)
def test_shutdown_instance_volume_not_found(self):
exc = exception.VolumeNotFound
self._test_shutdown_instance_exception(exc)
def test_shutdown_instance_disk_not_found(self):
exc = exception.DiskNotFound
self._test_shutdown_instance_exception(exc)
def test_shutdown_instance_other_exception(self):
exc = Exception('some other exception')
self._test_shutdown_instance_exception(exc)
def _test_init_instance_retries_reboot(self, instance, reboot_type,
return_power_state):
instance.host = self.compute.host
with contextlib.nested(
mock.patch.object(self.compute, '_get_power_state',
return_value=return_power_state),
mock.patch.object(self.compute, 'reboot_instance'),
mock.patch.object(compute_utils, 'get_nw_info_for_instance')
) as (
_get_power_state,
reboot_instance,
get_nw_info_for_instance
):
self.compute._init_instance(self.context, instance)
call = mock.call(self.context, instance, block_device_info=None,
reboot_type=reboot_type)
reboot_instance.assert_has_calls([call])
def test_init_instance_retries_reboot_pending(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.task_state = task_states.REBOOT_PENDING
for state in vm_states.ALLOW_SOFT_REBOOT:
instance.vm_state = state
self._test_init_instance_retries_reboot(instance, 'SOFT',
power_state.RUNNING)
def test_init_instance_retries_reboot_pending_hard(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.task_state = task_states.REBOOT_PENDING_HARD
for state in vm_states.ALLOW_HARD_REBOOT:
# NOTE(dave-mcnally) while a reboot of a vm in error state is
# possible we don't attempt to recover an error during init
if state == vm_states.ERROR:
continue
instance.vm_state = state
self._test_init_instance_retries_reboot(instance, 'HARD',
power_state.RUNNING)
def test_init_instance_retries_reboot_pending_soft_became_hard(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.task_state = task_states.REBOOT_PENDING
for state in vm_states.ALLOW_HARD_REBOOT:
# NOTE(dave-mcnally) while a reboot of a vm in error state is
# possible we don't attempt to recover an error during init
if state == vm_states.ERROR:
continue
instance.vm_state = state
self._test_init_instance_retries_reboot(instance, 'HARD',
power_state.SHUTDOWN)
self.assertEqual(task_states.REBOOT_PENDING_HARD,
instance.task_state)
def test_init_instance_retries_reboot_started(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.REBOOT_STARTED
self._test_init_instance_retries_reboot(instance, 'HARD',
power_state.NOSTATE)
def test_init_instance_retries_reboot_started_hard(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.REBOOT_STARTED_HARD
self._test_init_instance_retries_reboot(instance, 'HARD',
power_state.NOSTATE)
def _test_init_instance_cleans_reboot_state(self, instance):
instance.host = self.compute.host
with contextlib.nested(
mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.RUNNING),
mock.patch.object(instance, 'save', autospec=True),
mock.patch.object(compute_utils, 'get_nw_info_for_instance')
) as (
_get_power_state,
instance_save,
get_nw_info_for_instance
):
self.compute._init_instance(self.context, instance)
instance_save.assert_called_once_with()
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
def test_init_instance_cleans_image_state_reboot_started(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.REBOOT_STARTED
instance.power_state = power_state.RUNNING
self._test_init_instance_cleans_reboot_state(instance)
def test_init_instance_cleans_image_state_reboot_started_hard(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.REBOOT_STARTED_HARD
instance.power_state = power_state.RUNNING
self._test_init_instance_cleans_reboot_state(instance)
def test_init_instance_retries_power_off(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.id = 1
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_OFF
instance.host = self.compute.host
with mock.patch.object(self.compute, 'stop_instance'):
self.compute._init_instance(self.context, instance)
call = mock.call(self.context, instance, True)
self.compute.stop_instance.assert_has_calls([call])
def test_init_instance_retries_power_on(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.id = 1
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_ON
instance.host = self.compute.host
with mock.patch.object(self.compute, 'start_instance'):
self.compute._init_instance(self.context, instance)
call = mock.call(self.context, instance)
self.compute.start_instance.assert_has_calls([call])
def test_init_instance_retries_power_on_silent_exception(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.id = 1
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_ON
instance.host = self.compute.host
with mock.patch.object(self.compute, 'start_instance',
return_value=Exception):
init_return = self.compute._init_instance(self.context, instance)
call = mock.call(self.context, instance)
self.compute.start_instance.assert_has_calls([call])
self.assertIsNone(init_return)
def test_init_instance_retries_power_off_silent_exception(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.id = 1
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.POWERING_OFF
instance.host = self.compute.host
with mock.patch.object(self.compute, 'stop_instance',
return_value=Exception):
init_return = self.compute._init_instance(self.context, instance)
call = mock.call(self.context, instance, True)
self.compute.stop_instance.assert_has_calls([call])
self.assertIsNone(init_return)
def test_get_instances_on_driver(self):
driver_instances = []
for x in range(10):
driver_instances.append(fake_instance.fake_db_instance())
self.mox.StubOutWithMock(self.compute.driver,
'list_instance_uuids')
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
self.compute.driver.list_instance_uuids().AndReturn(
[inst['uuid'] for inst in driver_instances])
db.instance_get_all_by_filters(
self.context,
{'uuid': [inst['uuid'] for
inst in driver_instances]},
'created_at', 'desc', columns_to_join=None,
limit=None, marker=None,
use_slave=True).AndReturn(
driver_instances)
self.mox.ReplayAll()
result = self.compute._get_instances_on_driver(self.context)
self.assertEqual([x['uuid'] for x in driver_instances],
[x['uuid'] for x in result])
@mock.patch('nova.virt.driver.ComputeDriver.list_instance_uuids')
@mock.patch('nova.db.api.instance_get_all_by_filters')
def test_get_instances_on_driver_empty(self, mock_list, mock_db):
mock_list.return_value = []
result = self.compute._get_instances_on_driver(self.context)
# instance_get_all_by_filters should not be called
self.assertEqual(0, mock_db.call_count)
self.assertEqual([],
[x['uuid'] for x in result])
def test_get_instances_on_driver_fallback(self):
# Test getting instances when driver doesn't support
# 'list_instance_uuids'
self.compute.host = 'host'
filters = {'host': self.compute.host}
self.flags(instance_name_template='inst-%i')
all_instances = []
driver_instances = []
for x in range(10):
instance = fake_instance.fake_db_instance(name='inst-%i' % x,
id=x)
if x % 2:
driver_instances.append(instance)
all_instances.append(instance)
self.mox.StubOutWithMock(self.compute.driver,
'list_instance_uuids')
self.mox.StubOutWithMock(self.compute.driver,
'list_instances')
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
self.compute.driver.list_instance_uuids().AndRaise(
NotImplementedError())
self.compute.driver.list_instances().AndReturn(
[inst['name'] for inst in driver_instances])
db.instance_get_all_by_filters(
self.context, filters,
'created_at', 'desc', columns_to_join=None,
limit=None, marker=None,
use_slave=True).AndReturn(all_instances)
self.mox.ReplayAll()
result = self.compute._get_instances_on_driver(self.context, filters)
self.assertEqual([x['uuid'] for x in driver_instances],
[x['uuid'] for x in result])
def test_instance_usage_audit(self):
instances = [objects.Instance(uuid='foo')]
@classmethod
def fake_task_log(*a, **k):
pass
@classmethod
def fake_get(*a, **k):
return instances
self.flags(instance_usage_audit=True)
self.stubs.Set(objects.TaskLog, 'get', fake_task_log)
self.stubs.Set(objects.InstanceList,
'get_active_by_window_joined', fake_get)
self.stubs.Set(objects.TaskLog, 'begin_task', fake_task_log)
self.stubs.Set(objects.TaskLog, 'end_task', fake_task_log)
self.mox.StubOutWithMock(compute_utils, 'notify_usage_exists')
compute_utils.notify_usage_exists(self.compute.notifier,
self.context, instances[0], ignore_missing_network_data=False)
self.mox.ReplayAll()
self.compute._instance_usage_audit(self.context)
@mock.patch.object(objects.InstanceList, 'get_by_host')
def test_sync_power_states(self, mock_get):
instance = mock.Mock()
mock_get.return_value = [instance]
with mock.patch.object(self.compute._sync_power_pool,
'spawn_n') as mock_spawn:
self.compute._sync_power_states(mock.sentinel.context)
mock_get.assert_called_with(mock.sentinel.context,
self.compute.host, expected_attrs=[],
use_slave=True)
mock_spawn.assert_called_once_with(mock.ANY, instance)
def _get_sync_instance(self, power_state, vm_state, task_state=None,
shutdown_terminate=False):
instance = objects.Instance()
instance.uuid = 'fake-uuid'
instance.power_state = power_state
instance.vm_state = vm_state
instance.host = self.compute.host
instance.task_state = task_state
instance.shutdown_terminate = shutdown_terminate
self.mox.StubOutWithMock(instance, 'refresh')
self.mox.StubOutWithMock(instance, 'save')
return instance
def test_sync_instance_power_state_match(self):
instance = self._get_sync_instance(power_state.RUNNING,
vm_states.ACTIVE)
instance.refresh(use_slave=False)
self.mox.ReplayAll()
self.compute._sync_instance_power_state(self.context, instance,
power_state.RUNNING)
def test_sync_instance_power_state_running_stopped(self):
instance = self._get_sync_instance(power_state.RUNNING,
vm_states.ACTIVE)
instance.refresh(use_slave=False)
instance.save()
self.mox.ReplayAll()
self.compute._sync_instance_power_state(self.context, instance,
power_state.SHUTDOWN)
self.assertEqual(instance.power_state, power_state.SHUTDOWN)
def _test_sync_to_stop(self, power_state, vm_state, driver_power_state,
stop=True, force=False, shutdown_terminate=False):
instance = self._get_sync_instance(
power_state, vm_state, shutdown_terminate=shutdown_terminate)
instance.refresh(use_slave=False)
instance.save()
self.mox.StubOutWithMock(self.compute.compute_api, 'stop')
self.mox.StubOutWithMock(self.compute.compute_api, 'delete')
self.mox.StubOutWithMock(self.compute.compute_api, 'force_stop')
if shutdown_terminate:
self.compute.compute_api.delete(self.context, instance)
elif stop:
if force:
self.compute.compute_api.force_stop(self.context, instance)
else:
self.compute.compute_api.stop(self.context, instance)
self.mox.ReplayAll()
self.compute._sync_instance_power_state(self.context, instance,
driver_power_state)
self.mox.VerifyAll()
self.mox.UnsetStubs()
def test_sync_instance_power_state_to_stop(self):
for ps in (power_state.SHUTDOWN, power_state.CRASHED,
power_state.SUSPENDED):
self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps)
for ps in (power_state.SHUTDOWN, power_state.CRASHED):
self._test_sync_to_stop(power_state.PAUSED, vm_states.PAUSED, ps,
force=True)
self._test_sync_to_stop(power_state.SHUTDOWN, vm_states.STOPPED,
power_state.RUNNING, force=True)
def test_sync_instance_power_state_to_terminate(self):
self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE,
power_state.SHUTDOWN,
force=False, shutdown_terminate=True)
def test_sync_instance_power_state_to_no_stop(self):
for ps in (power_state.PAUSED, power_state.NOSTATE):
self._test_sync_to_stop(power_state.RUNNING, vm_states.ACTIVE, ps,
stop=False)
for vs in (vm_states.SOFT_DELETED, vm_states.DELETED):
for ps in (power_state.NOSTATE, power_state.SHUTDOWN):
self._test_sync_to_stop(power_state.RUNNING, vs, ps,
stop=False)
@mock.patch('nova.compute.manager.ComputeManager.'
'_sync_instance_power_state')
def test_query_driver_power_state_and_sync_pending_task(
self, mock_sync_power_state):
with mock.patch.object(self.compute.driver,
'get_info') as mock_get_info:
db_instance = objects.Instance(uuid='fake-uuid',
task_state=task_states.POWERING_OFF)
self.compute._query_driver_power_state_and_sync(self.context,
db_instance)
self.assertFalse(mock_get_info.called)
self.assertFalse(mock_sync_power_state.called)
@mock.patch('nova.compute.manager.ComputeManager.'
'_sync_instance_power_state')
def test_query_driver_power_state_and_sync_not_found_driver(
self, mock_sync_power_state):
error = exception.InstanceNotFound(instance_id=1)
with mock.patch.object(self.compute.driver,
'get_info', side_effect=error) as mock_get_info:
db_instance = objects.Instance(uuid='fake-uuid', task_state=None)
self.compute._query_driver_power_state_and_sync(self.context,
db_instance)
mock_get_info.assert_called_once_with(db_instance)
mock_sync_power_state.assert_called_once_with(self.context,
db_instance,
power_state.NOSTATE,
use_slave=True)
def test_run_pending_deletes(self):
self.flags(instance_delete_interval=10)
class FakeInstance(object):
def __init__(self, uuid, name, smd):
self.uuid = uuid
self.name = name
self.system_metadata = smd
self.cleaned = False
def __getitem__(self, name):
return getattr(self, name)
def save(self):
pass
a = FakeInstance('123', 'apple', {'clean_attempts': '100'})
b = FakeInstance('456', 'orange', {'clean_attempts': '3'})
c = FakeInstance('789', 'banana', {})
self.mox.StubOutWithMock(objects.InstanceList,
'get_by_filters')
objects.InstanceList.get_by_filters(
{'read_deleted': 'yes'},
{'deleted': True, 'soft_deleted': False, 'host': 'fake-mini',
'cleaned': False},
expected_attrs=['info_cache', 'security_groups',
'system_metadata'],
use_slave=True).AndReturn([a, b, c])
self.mox.StubOutWithMock(self.compute.driver, 'delete_instance_files')
self.compute.driver.delete_instance_files(
mox.IgnoreArg()).AndReturn(True)
self.compute.driver.delete_instance_files(
mox.IgnoreArg()).AndReturn(False)
self.mox.ReplayAll()
self.compute._run_pending_deletes({})
self.assertFalse(a.cleaned)
self.assertEqual('100', a.system_metadata['clean_attempts'])
self.assertTrue(b.cleaned)
self.assertEqual('4', b.system_metadata['clean_attempts'])
self.assertFalse(c.cleaned)
self.assertEqual('1', c.system_metadata['clean_attempts'])
@mock.patch.object(objects.Migration, 'obj_as_admin')
@mock.patch.object(objects.Migration, 'save')
@mock.patch.object(objects.MigrationList, 'get_by_filters')
@mock.patch.object(objects.InstanceList, 'get_by_filters')
def _test_cleanup_incomplete_migrations(self, inst_host,
mock_inst_get_by_filters,
mock_migration_get_by_filters,
mock_save, mock_obj_as_admin):
def fake_inst(context, uuid, host):
inst = objects.Instance(context)
inst.uuid = uuid
inst.host = host
return inst
def fake_migration(uuid, status, inst_uuid, src_host, dest_host):
migration = objects.Migration()
migration.uuid = uuid
migration.status = status
migration.instance_uuid = inst_uuid
migration.source_compute = src_host
migration.dest_compute = dest_host
return migration
fake_instances = [fake_inst(self.context, '111', inst_host),
fake_inst(self.context, '222', inst_host)]
fake_migrations = [fake_migration('123', 'error', '111',
'fake-host', 'fake-mini'),
fake_migration('456', 'error', '222',
'fake-host', 'fake-mini')]
mock_migration_get_by_filters.return_value = fake_migrations
mock_inst_get_by_filters.return_value = fake_instances
with mock.patch.object(self.compute.driver, 'delete_instance_files'):
self.compute._cleanup_incomplete_migrations(self.context)
# Ensure that migration status is set to 'failed' after instance
# files deletion for those instances whose instance.host is not
# same as compute host where periodic task is running.
for inst in fake_instances:
if inst.host != CONF.host:
for mig in fake_migrations:
if inst.uuid == mig.instance_uuid:
self.assertEqual('failed', mig.status)
def test_cleanup_incomplete_migrations_dest_node(self):
"""Test to ensure instance files are deleted from destination node.
If instance gets deleted during resizing/revert-resizing operation,
in that case instance files gets deleted from instance.host (source
host here), but there is possibility that instance files could be
present on destination node.
This test ensures that `_cleanup_incomplete_migration` periodic
task deletes orphaned instance files from destination compute node.
"""
self.flags(host='fake-mini')
self._test_cleanup_incomplete_migrations('fake-host')
def test_cleanup_incomplete_migrations_source_node(self):
"""Test to ensure instance files are deleted from source node.
If instance gets deleted during resizing/revert-resizing operation,
in that case instance files gets deleted from instance.host (dest
host here), but there is possibility that instance files could be
present on source node.
This test ensures that `_cleanup_incomplete_migration` periodic
task deletes orphaned instance files from source compute node.
"""
self.flags(host='fake-host')
self._test_cleanup_incomplete_migrations('fake-mini')
def test_attach_interface_failure(self):
# Test that the fault methods are invoked when an attach fails
db_instance = fake_instance.fake_db_instance()
f_instance = objects.Instance._from_db_object(self.context,
objects.Instance(),
db_instance)
e = exception.InterfaceAttachFailed(instance_uuid=f_instance.uuid)
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
@mock.patch.object(self.compute.network_api,
'allocate_port_for_instance',
side_effect=e)
@mock.patch.object(self.compute, '_instance_update',
side_effect=lambda *a, **k: {})
def do_test(update, meth, add_fault):
self.assertRaises(exception.InterfaceAttachFailed,
self.compute.attach_interface,
self.context, f_instance, 'net_id', 'port_id',
None)
add_fault.assert_has_calls([
mock.call(self.context, f_instance, e,
mock.ANY)])
do_test()
def test_detach_interface_failure(self):
# Test that the fault methods are invoked when a detach fails
# Build test data that will cause a PortNotFound exception
f_instance = mock.MagicMock()
f_instance.info_cache = mock.MagicMock()
f_instance.info_cache.network_info = []
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
@mock.patch.object(self.compute, '_set_instance_obj_error_state')
def do_test(meth, add_fault):
self.assertRaises(exception.PortNotFound,
self.compute.detach_interface,
self.context, f_instance, 'port_id')
add_fault.assert_has_calls(
[mock.call(self.context, f_instance, mock.ANY, mock.ANY)])
do_test()
def test_swap_volume_volume_api_usage(self):
# This test ensures that volume_id arguments are passed to volume_api
# and that volume states are OK
volumes = {}
old_volume_id = uuidutils.generate_uuid()
volumes[old_volume_id] = {'id': old_volume_id,
'display_name': 'old_volume',
'status': 'detaching',
'size': 1}
new_volume_id = uuidutils.generate_uuid()
volumes[new_volume_id] = {'id': new_volume_id,
'display_name': 'new_volume',
'status': 'available',
'size': 2}
def fake_vol_api_roll_detaching(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
if volumes[volume_id]['status'] == 'detaching':
volumes[volume_id]['status'] = 'in-use'
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(
{'device_name': '/dev/vdb', 'source_type': 'volume',
'destination_type': 'volume', 'instance_uuid': 'fake',
'connection_info': '{"foo": "bar"}'})
def fake_vol_api_func(context, volume, *args):
self.assertTrue(uuidutils.is_uuid_like(volume))
return {}
def fake_vol_get(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
return volumes[volume_id]
def fake_vol_unreserve(context, volume_id):
self.assertTrue(uuidutils.is_uuid_like(volume_id))
if volumes[volume_id]['status'] == 'attaching':
volumes[volume_id]['status'] = 'available'
def fake_vol_migrate_volume_completion(context, old_volume_id,
new_volume_id, error=False):
self.assertTrue(uuidutils.is_uuid_like(old_volume_id))
self.assertTrue(uuidutils.is_uuid_like(new_volume_id))
volumes[old_volume_id]['status'] = 'in-use'
return {'save_volume_id': new_volume_id}
def fake_func_exc(*args, **kwargs):
raise AttributeError # Random exception
def fake_swap_volume(old_connection_info, new_connection_info,
instance, mountpoint, resize_to):
self.assertEqual(resize_to, 2)
def fake_block_device_mapping_update(ctxt, id, updates, legacy):
self.assertEqual(2, updates['volume_size'])
return fake_bdm
self.stubs.Set(self.compute.volume_api, 'roll_detaching',
fake_vol_api_roll_detaching)
self.stubs.Set(self.compute.volume_api, 'get', fake_vol_get)
self.stubs.Set(self.compute.volume_api, 'initialize_connection',
fake_vol_api_func)
self.stubs.Set(self.compute.volume_api, 'unreserve_volume',
fake_vol_unreserve)
self.stubs.Set(self.compute.volume_api, 'terminate_connection',
fake_vol_api_func)
self.stubs.Set(db, 'block_device_mapping_get_by_volume_id',
lambda x, y, z: fake_bdm)
self.stubs.Set(self.compute.driver, 'get_volume_connector',
lambda x: {})
self.stubs.Set(self.compute.driver, 'swap_volume',
fake_swap_volume)
self.stubs.Set(self.compute.volume_api, 'migrate_volume_completion',
fake_vol_migrate_volume_completion)
self.stubs.Set(db, 'block_device_mapping_update',
fake_block_device_mapping_update)
self.stubs.Set(db,
'instance_fault_create',
lambda x, y:
test_instance_fault.fake_faults['fake-uuid'][0])
self.stubs.Set(self.compute, '_instance_update',
lambda c, u, **k: {})
# Good path
self.compute.swap_volume(self.context, old_volume_id, new_volume_id,
fake_instance.fake_instance_obj(
self.context, **{'uuid': 'fake'}))
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
# Error paths
volumes[old_volume_id]['status'] = 'detaching'
volumes[new_volume_id]['status'] = 'attaching'
self.stubs.Set(self.compute.driver, 'swap_volume', fake_func_exc)
self.assertRaises(AttributeError, self.compute.swap_volume,
self.context, old_volume_id, new_volume_id,
fake_instance.fake_instance_obj(
self.context, **{'uuid': 'fake'}))
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
volumes[old_volume_id]['status'] = 'detaching'
volumes[new_volume_id]['status'] = 'attaching'
self.stubs.Set(self.compute.volume_api, 'initialize_connection',
fake_func_exc)
self.assertRaises(AttributeError, self.compute.swap_volume,
self.context, old_volume_id, new_volume_id,
fake_instance.fake_instance_obj(
self.context, **{'uuid': 'fake'}))
self.assertEqual(volumes[old_volume_id]['status'], 'in-use')
self.assertEqual(volumes[new_volume_id]['status'], 'available')
@mock.patch.object(compute_utils, 'EventReporter')
def test_check_can_live_migrate_source(self, event_mock):
is_volume_backed = 'volume_backed'
dest_check_data = dict(foo='bar')
db_instance = fake_instance.fake_db_instance()
instance = objects.Instance._from_db_object(
self.context, objects.Instance(), db_instance)
expected_dest_check_data = dict(dest_check_data,
is_volume_backed=is_volume_backed)
self.mox.StubOutWithMock(self.compute.compute_api,
'is_volume_backed_instance')
self.mox.StubOutWithMock(self.compute,
'_get_instance_block_device_info')
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_source')
self.compute.compute_api.is_volume_backed_instance(
self.context, instance).AndReturn(is_volume_backed)
self.compute._get_instance_block_device_info(
self.context, instance, refresh_conn_info=True
).AndReturn({'block_device_mapping': 'fake'})
self.compute.driver.check_can_live_migrate_source(
self.context, instance, expected_dest_check_data,
{'block_device_mapping': 'fake'})
self.mox.ReplayAll()
self.compute.check_can_live_migrate_source(
self.context, instance=instance,
dest_check_data=dest_check_data)
event_mock.assert_called_once_with(
self.context, 'compute_check_can_live_migrate_source',
instance.uuid)
@mock.patch.object(compute_utils, 'EventReporter')
def _test_check_can_live_migrate_destination(self, event_mock,
do_raise=False,
has_mig_data=False):
db_instance = fake_instance.fake_db_instance(host='fake-host')
instance = objects.Instance._from_db_object(
self.context, objects.Instance(), db_instance)
instance.host = 'fake-host'
block_migration = 'block_migration'
disk_over_commit = 'disk_over_commit'
src_info = 'src_info'
dest_info = 'dest_info'
dest_check_data = dict(foo='bar')
mig_data = dict(cow='moo')
expected_result = dict(mig_data)
if has_mig_data:
dest_check_data['migrate_data'] = dict(cat='meow')
expected_result.update(cat='meow')
self.mox.StubOutWithMock(self.compute, '_get_compute_info')
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_destination')
self.mox.StubOutWithMock(self.compute.compute_rpcapi,
'check_can_live_migrate_source')
self.mox.StubOutWithMock(self.compute.driver,
'check_can_live_migrate_destination_cleanup')
self.compute._get_compute_info(self.context,
'fake-host').AndReturn(src_info)
self.compute._get_compute_info(self.context,
CONF.host).AndReturn(dest_info)
self.compute.driver.check_can_live_migrate_destination(
self.context, instance, src_info, dest_info,
block_migration, disk_over_commit).AndReturn(dest_check_data)
mock_meth = self.compute.compute_rpcapi.check_can_live_migrate_source(
self.context, instance, dest_check_data)
if do_raise:
mock_meth.AndRaise(test.TestingException())
self.mox.StubOutWithMock(db, 'instance_fault_create')
db.instance_fault_create(
self.context, mox.IgnoreArg()).AndReturn(
test_instance_fault.fake_faults['fake-uuid'][0])
else:
mock_meth.AndReturn(mig_data)
self.compute.driver.check_can_live_migrate_destination_cleanup(
self.context, dest_check_data)
self.mox.ReplayAll()
result = self.compute.check_can_live_migrate_destination(
self.context, instance=instance,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
self.assertEqual(expected_result, result)
event_mock.assert_called_once_with(
self.context, 'compute_check_can_live_migrate_destination',
instance.uuid)
def test_check_can_live_migrate_destination_success(self):
self._test_check_can_live_migrate_destination()
def test_check_can_live_migrate_destination_success_w_mig_data(self):
self._test_check_can_live_migrate_destination(has_mig_data=True)
def test_check_can_live_migrate_destination_fail(self):
self.assertRaises(
test.TestingException,
self._test_check_can_live_migrate_destination,
do_raise=True)
@mock.patch('nova.compute.manager.InstanceEvents._lock_name')
def test_prepare_for_instance_event(self, lock_name_mock):
inst_obj = objects.Instance(uuid='foo')
result = self.compute.instance_events.prepare_for_instance_event(
inst_obj, 'test-event')
self.assertIn('foo', self.compute.instance_events._events)
self.assertIn('test-event',
self.compute.instance_events._events['foo'])
self.assertEqual(
result,
self.compute.instance_events._events['foo']['test-event'])
self.assertTrue(hasattr(result, 'send'))
lock_name_mock.assert_called_once_with(inst_obj)
@mock.patch('nova.compute.manager.InstanceEvents._lock_name')
def test_pop_instance_event(self, lock_name_mock):
event = eventlet_event.Event()
self.compute.instance_events._events = {
'foo': {
'network-vif-plugged': event,
}
}
inst_obj = objects.Instance(uuid='foo')
event_obj = objects.InstanceExternalEvent(name='network-vif-plugged',
tag=None)
result = self.compute.instance_events.pop_instance_event(inst_obj,
event_obj)
self.assertEqual(result, event)
lock_name_mock.assert_called_once_with(inst_obj)
@mock.patch('nova.compute.manager.InstanceEvents._lock_name')
def test_clear_events_for_instance(self, lock_name_mock):
event = eventlet_event.Event()
self.compute.instance_events._events = {
'foo': {
'test-event': event,
}
}
inst_obj = objects.Instance(uuid='foo')
result = self.compute.instance_events.clear_events_for_instance(
inst_obj)
self.assertEqual(result, {'test-event': event})
lock_name_mock.assert_called_once_with(inst_obj)
def test_instance_events_lock_name(self):
inst_obj = objects.Instance(uuid='foo')
result = self.compute.instance_events._lock_name(inst_obj)
self.assertEqual(result, 'foo-events')
def test_prepare_for_instance_event_again(self):
inst_obj = objects.Instance(uuid='foo')
self.compute.instance_events.prepare_for_instance_event(
inst_obj, 'test-event')
# A second attempt will avoid creating a new list; make sure we
# get the current list
result = self.compute.instance_events.prepare_for_instance_event(
inst_obj, 'test-event')
self.assertIn('foo', self.compute.instance_events._events)
self.assertIn('test-event',
self.compute.instance_events._events['foo'])
self.assertEqual(
result,
self.compute.instance_events._events['foo']['test-event'])
self.assertTrue(hasattr(result, 'send'))
def test_process_instance_event(self):
event = eventlet_event.Event()
self.compute.instance_events._events = {
'foo': {
'network-vif-plugged': event,
}
}
inst_obj = objects.Instance(uuid='foo')
event_obj = objects.InstanceExternalEvent(name='network-vif-plugged',
tag=None)
self.compute._process_instance_event(inst_obj, event_obj)
self.assertTrue(event.ready())
self.assertEqual(event_obj, event.wait())
self.assertEqual({}, self.compute.instance_events._events)
def test_process_instance_vif_deleted_event(self):
vif1 = fake_network_cache_model.new_vif()
vif1['id'] = '1'
vif2 = fake_network_cache_model.new_vif()
vif2['id'] = '2'
nw_info = network_model.NetworkInfo([vif1, vif2])
info_cache = objects.InstanceInfoCache(network_info=nw_info,
instance_uuid='uuid')
inst_obj = objects.Instance(id=3, uuid='uuid', info_cache=info_cache)
@mock.patch.object(manager.base_net_api,
'update_instance_cache_with_nw_info')
@mock.patch.object(self.compute.driver, 'detach_interface')
def do_test(detach_interface, update_instance_cache_with_nw_info):
self.compute._process_instance_vif_deleted_event(self.context,
inst_obj,
vif2['id'])
update_instance_cache_with_nw_info.assert_called_once_with(
self.compute.network_api,
self.context,
inst_obj,
nw_info=[vif1])
detach_interface.assert_called_once_with(inst_obj, vif2)
do_test()
def test_external_instance_event(self):
instances = [
objects.Instance(id=1, uuid='uuid1'),
objects.Instance(id=2, uuid='uuid2'),
objects.Instance(id=3, uuid='uuid3')]
events = [
objects.InstanceExternalEvent(name='network-changed',
tag='tag1',
instance_uuid='uuid1'),
objects.InstanceExternalEvent(name='network-vif-plugged',
instance_uuid='uuid2',
tag='tag2'),
objects.InstanceExternalEvent(name='network-vif-deleted',
instance_uuid='uuid3',
tag='tag3')]
@mock.patch.object(self.compute, '_process_instance_vif_deleted_event')
@mock.patch.object(self.compute.network_api, 'get_instance_nw_info')
@mock.patch.object(self.compute, '_process_instance_event')
def do_test(_process_instance_event, get_instance_nw_info,
_process_instance_vif_deleted_event):
self.compute.external_instance_event(self.context,
instances, events)
get_instance_nw_info.assert_called_once_with(self.context,
instances[0])
_process_instance_event.assert_called_once_with(instances[1],
events[1])
_process_instance_vif_deleted_event.assert_called_once_with(
self.context, instances[2], events[2].tag)
do_test()
def test_external_instance_event_with_exception(self):
vif1 = fake_network_cache_model.new_vif()
vif1['id'] = '1'
vif2 = fake_network_cache_model.new_vif()
vif2['id'] = '2'
nw_info = network_model.NetworkInfo([vif1, vif2])
info_cache = objects.InstanceInfoCache(network_info=nw_info,
instance_uuid='uuid2')
instances = [
objects.Instance(id=1, uuid='uuid1'),
objects.Instance(id=2, uuid='uuid2', info_cache=info_cache),
objects.Instance(id=3, uuid='uuid3')]
events = [
objects.InstanceExternalEvent(name='network-changed',
tag='tag1',
instance_uuid='uuid1'),
objects.InstanceExternalEvent(name='network-vif-deleted',
instance_uuid='uuid2',
tag='2'),
objects.InstanceExternalEvent(name='network-vif-plugged',
instance_uuid='uuid3',
tag='tag3')]
# Make sure all the three events are handled despite the exceptions in
# processing events 1 and 2
@mock.patch.object(manager.base_net_api,
'update_instance_cache_with_nw_info')
@mock.patch.object(self.compute.driver, 'detach_interface',
side_effect=exception.NovaException)
@mock.patch.object(self.compute.network_api, 'get_instance_nw_info',
side_effect=exception.InstanceInfoCacheNotFound(
instance_uuid='uuid1'))
@mock.patch.object(self.compute, '_process_instance_event')
def do_test(_process_instance_event, get_instance_nw_info,
detach_interface, update_instance_cache_with_nw_info):
self.compute.external_instance_event(self.context,
instances, events)
get_instance_nw_info.assert_called_once_with(self.context,
instances[0])
update_instance_cache_with_nw_info.assert_called_once_with(
self.compute.network_api,
self.context,
instances[1],
nw_info=[vif1])
detach_interface.assert_called_once_with(instances[1], vif2)
_process_instance_event.assert_called_once_with(instances[2],
events[2])
do_test()
def test_cancel_all_events(self):
inst = objects.Instance(uuid='uuid')
fake_eventlet_event = mock.MagicMock()
self.compute.instance_events._events = {
inst.uuid: {
'network-vif-plugged-bar': fake_eventlet_event,
}
}
self.compute.instance_events.cancel_all_events()
self.assertTrue(fake_eventlet_event.send.called)
event = fake_eventlet_event.send.call_args_list[0][0][0]
self.assertEqual('network-vif-plugged', event.name)
self.assertEqual('bar', event.tag)
self.assertEqual('failed', event.status)
def test_cleanup_cancels_all_events(self):
with mock.patch.object(self.compute, 'instance_events') as mock_ev:
self.compute.cleanup_host()
mock_ev.cancel_all_events.assert_called_once_with()
def test_cleanup_blocks_new_events(self):
instance = objects.Instance(uuid='uuid')
self.compute.instance_events.cancel_all_events()
callback = mock.MagicMock()
body = mock.MagicMock()
with self.compute.virtapi.wait_for_instance_event(
instance, ['network-vif-plugged-bar'],
error_callback=callback):
body()
self.assertTrue(body.called)
callback.assert_called_once_with('network-vif-plugged-bar', instance)
def test_pop_events_fails_gracefully(self):
inst = objects.Instance(uuid='uuid')
event = mock.MagicMock()
self.compute.instance_events._events = None
self.assertIsNone(
self.compute.instance_events.pop_instance_event(inst, event))
def test_clear_events_fails_gracefully(self):
inst = objects.Instance(uuid='uuid')
self.compute.instance_events._events = None
self.assertEqual(
self.compute.instance_events.clear_events_for_instance(inst), {})
def test_retry_reboot_pending_soft(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.task_state = task_states.REBOOT_PENDING
instance.vm_state = vm_states.ACTIVE
with mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.RUNNING):
allow_reboot, reboot_type = self.compute._retry_reboot(
context, instance)
self.assertTrue(allow_reboot)
self.assertEqual(reboot_type, 'SOFT')
def test_retry_reboot_pending_hard(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.task_state = task_states.REBOOT_PENDING_HARD
instance.vm_state = vm_states.ACTIVE
with mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.RUNNING):
allow_reboot, reboot_type = self.compute._retry_reboot(
context, instance)
self.assertTrue(allow_reboot)
self.assertEqual(reboot_type, 'HARD')
def test_retry_reboot_starting_soft_off(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.task_state = task_states.REBOOT_STARTED
with mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.NOSTATE):
allow_reboot, reboot_type = self.compute._retry_reboot(
context, instance)
self.assertTrue(allow_reboot)
self.assertEqual(reboot_type, 'HARD')
def test_retry_reboot_starting_hard_off(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.task_state = task_states.REBOOT_STARTED_HARD
with mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.NOSTATE):
allow_reboot, reboot_type = self.compute._retry_reboot(
context, instance)
self.assertTrue(allow_reboot)
self.assertEqual(reboot_type, 'HARD')
def test_retry_reboot_starting_hard_on(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.task_state = task_states.REBOOT_STARTED_HARD
with mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.RUNNING):
allow_reboot, reboot_type = self.compute._retry_reboot(
context, instance)
self.assertFalse(allow_reboot)
self.assertEqual(reboot_type, 'HARD')
def test_retry_reboot_no_reboot(self):
instance = objects.Instance(self.context)
instance.uuid = 'foo'
instance.task_state = 'bar'
with mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.RUNNING):
allow_reboot, reboot_type = self.compute._retry_reboot(
context, instance)
self.assertFalse(allow_reboot)
self.assertEqual(reboot_type, 'HARD')
@mock.patch('nova.objects.BlockDeviceMapping.get_by_volume_id')
@mock.patch('nova.compute.manager.ComputeManager._driver_detach_volume')
@mock.patch('nova.objects.Instance._from_db_object')
def test_remove_volume_connection(self, inst_from_db, detach, bdm_get):
bdm = mock.sentinel.bdm
inst_obj = mock.sentinel.inst_obj
bdm_get.return_value = bdm
inst_from_db.return_value = inst_obj
with mock.patch.object(self.compute, 'volume_api'):
self.compute.remove_volume_connection(self.context, 'vol',
inst_obj)
detach.assert_called_once_with(self.context, inst_obj, bdm)
def test_detach_volume(self):
self._test_detach_volume()
def test_detach_volume_not_destroy_bdm(self):
self._test_detach_volume(destroy_bdm=False)
@mock.patch('nova.objects.BlockDeviceMapping.get_by_volume_id')
@mock.patch('nova.compute.manager.ComputeManager._driver_detach_volume')
@mock.patch('nova.compute.manager.ComputeManager.'
'_notify_about_instance_usage')
def _test_detach_volume(self, notify_inst_usage, detach,
bdm_get, destroy_bdm=True):
volume_id = '123'
inst_obj = mock.sentinel.inst_obj
bdm = mock.MagicMock(spec=objects.BlockDeviceMapping)
bdm.device_name = 'vdb'
bdm_get.return_value = bdm
detach.return_value = {}
with mock.patch.object(self.compute, 'volume_api') as volume_api:
with mock.patch.object(self.compute, 'driver') as driver:
connector_sentinel = mock.sentinel.connector
driver.get_volume_connector.return_value = connector_sentinel
self.compute._detach_volume(self.context, volume_id,
inst_obj,
destroy_bdm=destroy_bdm)
detach.assert_called_once_with(self.context, inst_obj, bdm)
driver.get_volume_connector.assert_called_once_with(inst_obj)
volume_api.terminate_connection.assert_called_once_with(
self.context, volume_id, connector_sentinel)
volume_api.detach.assert_called_once_with(mock.ANY, volume_id)
notify_inst_usage.assert_called_once_with(
self.context, inst_obj, "volume.detach",
extra_usage_info={'volume_id': volume_id}
)
if destroy_bdm:
bdm.destroy.assert_called_once_with()
else:
self.assertFalse(bdm.destroy.called)
def test_detach_volume_evacuate(self):
"""For evacuate, terminate_connection is called with original host."""
expected_connector = {'host': 'evacuated-host'}
conn_info_str = '{"connector": {"host": "evacuated-host"}}'
self._test_detach_volume_evacuate(conn_info_str,
expected=expected_connector)
def test_detach_volume_evacuate_legacy(self):
"""Test coverage for evacuate with legacy attachments.
In this case, legacy means the volume was attached to the instance
before nova stashed the connector in connection_info. The connector
sent to terminate_connection will still be for the local host in this
case because nova does not have the info to get the connector for the
original (evacuated) host.
"""
conn_info_str = '{"foo": "bar"}' # Has no 'connector'.
self._test_detach_volume_evacuate(conn_info_str)
def test_detach_volume_evacuate_mismatch(self):
"""Test coverage for evacuate with connector mismatch.
For evacuate, if the stashed connector also has the wrong host,
then log it and stay with the local connector.
"""
conn_info_str = '{"connector": {"host": "other-host"}}'
self._test_detach_volume_evacuate(conn_info_str)
@mock.patch('nova.objects.BlockDeviceMapping.get_by_volume_id')
@mock.patch('nova.compute.manager.ComputeManager.'
'_notify_about_instance_usage')
def _test_detach_volume_evacuate(self, conn_info_str, notify_inst_usage,
bdm_get, expected=None):
"""Re-usable code for detach volume evacuate test cases.
:param conn_info_str: String form of the stashed connector.
:param expected: Dict of the connector that is expected in the
terminate call (optional). Default is to expect the
local connector to be used.
"""
volume_id = 'vol_id'
instance = fake_instance.fake_instance_obj(self.context,
host='evacuated-host')
bdm = mock.Mock()
bdm.connection_info = conn_info_str
bdm_get.return_value = bdm
local_connector = {'host': 'local-connector-host'}
expected_connector = local_connector if not expected else expected
with mock.patch.object(self.compute, 'volume_api') as volume_api:
with mock.patch.object(self.compute, 'driver') as driver:
driver.get_volume_connector.return_value = local_connector
self.compute._detach_volume(self.context,
volume_id,
instance,
destroy_bdm=False)
driver.get_volume_connector.assert_called_once_with(instance)
volume_api.terminate_connection.assert_called_once_with(
self.context, volume_id, expected_connector)
volume_api.detach.assert_called_once_with(mock.ANY, volume_id)
notify_inst_usage.assert_called_once_with(
self.context, instance, "volume.detach",
extra_usage_info={'volume_id': volume_id}
)
def test__driver_detach_volume_return(self):
"""_driver_detach_volume returns the connection_info from loads()."""
with mock.patch.object(jsonutils, 'loads') as loads:
conn_info_str = 'test-expected-loads-param'
bdm = mock.Mock()
bdm.connection_info = conn_info_str
loads.return_value = {'test-loads-key': 'test loads return value'}
instance = fake_instance.fake_instance_obj(self.context)
ret = self.compute._driver_detach_volume(self.context,
instance,
bdm)
self.assertEqual(loads.return_value, ret)
loads.assert_called_once_with(conn_info_str)
def _test_rescue(self, clean_shutdown=True):
instance = fake_instance.fake_instance_obj(
self.context, vm_state=vm_states.ACTIVE)
fake_nw_info = network_model.NetworkInfo()
rescue_image_meta = {'id': 'fake', 'name': 'fake'}
with contextlib.nested(
mock.patch.object(self.context, 'elevated',
return_value=self.context),
mock.patch.object(self.compute.network_api, 'get_instance_nw_info',
return_value=fake_nw_info),
mock.patch.object(self.compute, '_get_rescue_image',
return_value=rescue_image_meta),
mock.patch.object(self.compute, '_notify_about_instance_usage'),
mock.patch.object(self.compute, '_power_off_instance'),
mock.patch.object(self.compute.driver, 'rescue'),
mock.patch.object(compute_utils, 'notify_usage_exists'),
mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.RUNNING),
mock.patch.object(instance, 'save')
) as (
elevated_context, get_nw_info,
get_rescue_image, notify_instance_usage, power_off_instance,
driver_rescue, notify_usage_exists, get_power_state, instance_save
):
self.compute.rescue_instance(
self.context, instance, rescue_password='verybadpass',
rescue_image_ref=None, clean_shutdown=clean_shutdown)
# assert the field values on the instance object
self.assertEqual(vm_states.RESCUED, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertEqual(power_state.RUNNING, instance.power_state)
self.assertIsNotNone(instance.launched_at)
# assert our mock calls
get_nw_info.assert_called_once_with(self.context, instance)
get_rescue_image.assert_called_once_with(
self.context, instance, None)
extra_usage_info = {'rescue_image_name': 'fake'}
notify_calls = [
mock.call(self.context, instance, "rescue.start",
extra_usage_info=extra_usage_info,
network_info=fake_nw_info),
mock.call(self.context, instance, "rescue.end",
extra_usage_info=extra_usage_info,
network_info=fake_nw_info)
]
notify_instance_usage.assert_has_calls(notify_calls)
power_off_instance.assert_called_once_with(self.context, instance,
clean_shutdown)
driver_rescue.assert_called_once_with(
self.context, instance, fake_nw_info, rescue_image_meta,
'verybadpass')
notify_usage_exists.assert_called_once_with(self.compute.notifier,
self.context, instance, current_period=True)
instance_save.assert_called_once_with(
expected_task_state=task_states.RESCUING)
def test_rescue(self):
self._test_rescue()
def test_rescue_forced_shutdown(self):
self._test_rescue(clean_shutdown=False)
def test_unrescue(self):
instance = fake_instance.fake_instance_obj(
self.context, vm_state=vm_states.RESCUED)
fake_nw_info = network_model.NetworkInfo()
with contextlib.nested(
mock.patch.object(self.context, 'elevated',
return_value=self.context),
mock.patch.object(self.compute.network_api, 'get_instance_nw_info',
return_value=fake_nw_info),
mock.patch.object(self.compute, '_notify_about_instance_usage'),
mock.patch.object(self.compute.driver, 'unrescue'),
mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.RUNNING),
mock.patch.object(instance, 'save')
) as (
elevated_context, get_nw_info,
notify_instance_usage, driver_unrescue, get_power_state,
instance_save
):
self.compute.unrescue_instance(self.context, instance)
# assert the field values on the instance object
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
self.assertEqual(power_state.RUNNING, instance.power_state)
# assert our mock calls
get_nw_info.assert_called_once_with(self.context, instance)
notify_calls = [
mock.call(self.context, instance, "unrescue.start",
network_info=fake_nw_info),
mock.call(self.context, instance, "unrescue.end",
network_info=fake_nw_info)
]
notify_instance_usage.assert_has_calls(notify_calls)
driver_unrescue.assert_called_once_with(instance, fake_nw_info)
instance_save.assert_called_once_with(
expected_task_state=task_states.UNRESCUING)
@mock.patch('nova.compute.manager.ComputeManager._get_power_state',
return_value=power_state.RUNNING)
@mock.patch.object(objects.Instance, 'save')
@mock.patch('nova.utils.generate_password', return_value='fake-pass')
def test_set_admin_password(self, gen_password_mock,
instance_save_mock, power_state_mock):
# Ensure instance can have its admin password set.
instance = fake_instance.fake_instance_obj(
self.context,
vm_state=vm_states.ACTIVE,
task_state=task_states.UPDATING_PASSWORD)
@mock.patch.object(self.context, 'elevated', return_value=self.context)
@mock.patch.object(self.compute.driver, 'set_admin_password')
def do_test(driver_mock, elevated_mock):
# call the manager method
self.compute.set_admin_password(self.context, instance, None)
# make our assertions
self.assertEqual(vm_states.ACTIVE, instance.vm_state)
self.assertIsNone(instance.task_state)
power_state_mock.assert_called_once_with(self.context, instance)
driver_mock.assert_called_once_with(instance, 'fake-pass')
instance_save_mock.assert_called_once_with(
expected_task_state=task_states.UPDATING_PASSWORD)
do_test()
@mock.patch('nova.compute.manager.ComputeManager._get_power_state',
return_value=power_state.NOSTATE)
@mock.patch('nova.compute.manager.ComputeManager._instance_update')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
def test_set_admin_password_bad_state(self, add_fault_mock,
instance_save_mock,
update_mock,
power_state_mock):
# Test setting password while instance is rebuilding.
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(self.context, 'elevated',
return_value=self.context):
# call the manager method
self.assertRaises(exception.InstancePasswordSetFailed,
self.compute.set_admin_password,
self.context, instance, None)
# make our assertions
power_state_mock.assert_called_once_with(self.context, instance)
instance_save_mock.assert_called_once_with(
expected_task_state=task_states.UPDATING_PASSWORD)
add_fault_mock.assert_called_once_with(
self.context, instance, mock.ANY, mock.ANY)
@mock.patch('nova.utils.generate_password', return_value='fake-pass')
@mock.patch('nova.compute.manager.ComputeManager._get_power_state',
return_value=power_state.RUNNING)
@mock.patch('nova.compute.manager.ComputeManager._instance_update')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
def _do_test_set_admin_password_driver_error(self, exc,
expected_vm_state,
expected_task_state,
expected_exception,
add_fault_mock,
instance_save_mock,
update_mock,
power_state_mock,
gen_password_mock):
# Ensure expected exception is raised if set_admin_password fails.
instance = fake_instance.fake_instance_obj(
self.context,
vm_state=vm_states.ACTIVE,
task_state=task_states.UPDATING_PASSWORD)
@mock.patch.object(self.context, 'elevated', return_value=self.context)
@mock.patch.object(self.compute.driver, 'set_admin_password',
side_effect=exc)
def do_test(driver_mock, elevated_mock):
# error raised from the driver should not reveal internal
# information so a new error is raised
self.assertRaises(expected_exception,
self.compute.set_admin_password,
self.context,
instance=instance,
new_pass=None)
if expected_exception == NotImplementedError:
instance_save_mock.assert_called_once_with(
expected_task_state=task_states.UPDATING_PASSWORD)
else:
# setting the instance to error state
instance_save_mock.assert_called_once_with()
self.assertEqual(expected_vm_state, instance.vm_state)
# check revert_task_state decorator
update_mock.assert_called_once_with(
self.context, instance, task_state=expected_task_state)
# check wrap_instance_fault decorator
add_fault_mock.assert_called_once_with(
self.context, instance, mock.ANY, mock.ANY)
do_test()
def test_set_admin_password_driver_not_authorized(self):
# Ensure expected exception is raised if set_admin_password not
# authorized.
exc = exception.Forbidden('Internal error')
expected_exception = exception.InstancePasswordSetFailed
self._do_test_set_admin_password_driver_error(
exc, vm_states.ERROR, None, expected_exception)
def test_set_admin_password_driver_not_implemented(self):
# Ensure expected exception is raised if set_admin_password not
# implemented by driver.
exc = NotImplementedError()
expected_exception = NotImplementedError
self._do_test_set_admin_password_driver_error(
exc, vm_states.ACTIVE, None, expected_exception)
def test_destroy_evacuated_instances(self):
our_host = self.compute.host
instance_1 = objects.Instance(self.context)
instance_1.uuid = 'foo'
instance_1.task_state = None
instance_1.vm_state = vm_states.ACTIVE
instance_1.host = 'not-' + our_host
instance_2 = objects.Instance(self.context)
instance_2.uuid = 'bar'
instance_2.task_state = None
instance_2.vm_state = vm_states.ACTIVE
instance_2.host = 'not-' + our_host
# Only instance 2 has a migration record
migration = objects.Migration(instance_uuid=instance_2.uuid)
# Consider the migration successful
migration.status = 'done'
with contextlib.nested(
mock.patch.object(self.compute, '_get_instances_on_driver',
return_value=[instance_1,
instance_2]),
mock.patch.object(self.compute.network_api, 'get_instance_nw_info',
return_value=None),
mock.patch.object(self.compute, '_get_instance_block_device_info',
return_value={}),
mock.patch.object(self.compute, '_is_instance_storage_shared',
return_value=False),
mock.patch.object(self.compute.driver, 'destroy'),
mock.patch('nova.objects.MigrationList.get_by_filters'),
mock.patch('nova.objects.Migration.save')
) as (_get_instances_on_driver, get_instance_nw_info,
_get_instance_block_device_info, _is_instance_storage_shared,
destroy, migration_list, migration_save):
migration_list.return_value = [migration]
self.compute._destroy_evacuated_instances(self.context)
# Only instance 2 should be deleted. Instance 1 is still running
# here, but no migration from our host exists, so ignore it
destroy.assert_called_once_with(self.context, instance_2, None,
{}, True)
@mock.patch('nova.compute.manager.ComputeManager.'
'_destroy_evacuated_instances')
@mock.patch('nova.compute.manager.LOG')
def test_init_host_foreign_instance(self, mock_log, mock_destroy):
inst = mock.MagicMock()
inst.host = self.compute.host + '-alt'
self.compute._init_instance(mock.sentinel.context, inst)
self.assertFalse(inst.save.called)
self.assertTrue(mock_log.warning.called)
msg = mock_log.warning.call_args_list[0]
self.assertIn('appears to not be owned by this host', msg[0][0])
@mock.patch('nova.compute.manager.ComputeManager._instance_update')
def test_error_out_instance_on_exception_not_implemented_err(self,
inst_update_mock):
instance = fake_instance.fake_instance_obj(self.context)
def do_test():
with self.compute._error_out_instance_on_exception(
self.context, instance, instance_state=vm_states.STOPPED):
raise NotImplementedError('test')
self.assertRaises(NotImplementedError, do_test)
inst_update_mock.assert_called_once_with(
self.context, instance,
vm_state=vm_states.STOPPED, task_state=None)
@mock.patch('nova.compute.manager.ComputeManager._instance_update')
def test_error_out_instance_on_exception_inst_fault_rollback(self,
inst_update_mock):
instance = fake_instance.fake_instance_obj(self.context)
def do_test():
with self.compute._error_out_instance_on_exception(self.context,
instance):
raise exception.InstanceFaultRollback(
inner_exception=test.TestingException('test'))
self.assertRaises(test.TestingException, do_test)
inst_update_mock.assert_called_once_with(
self.context, instance,
vm_state=vm_states.ACTIVE, task_state=None)
@mock.patch('nova.compute.manager.ComputeManager.'
'_set_instance_obj_error_state')
def test_error_out_instance_on_exception_unknown_with_quotas(self,
set_error):
instance = fake_instance.fake_instance_obj(self.context)
quotas = mock.create_autospec(objects.Quotas, spec_set=True)
def do_test():
with self.compute._error_out_instance_on_exception(
self.context, instance, quotas):
raise test.TestingException('test')
self.assertRaises(test.TestingException, do_test)
self.assertEqual(1, len(quotas.method_calls))
self.assertEqual(mock.call.rollback(), quotas.method_calls[0])
set_error.assert_called_once_with(self.context, instance)
def test_cleanup_volumes(self):
instance = fake_instance.fake_instance_obj(self.context)
bdm_do_not_delete_dict = fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 'fake-id1', 'source_type': 'image',
'delete_on_termination': False})
bdm_delete_dict = fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 'fake-id2', 'source_type': 'image',
'delete_on_termination': True})
bdms = block_device_obj.block_device_make_list(self.context,
[bdm_do_not_delete_dict, bdm_delete_dict])
with mock.patch.object(self.compute.volume_api,
'delete') as volume_delete:
self.compute._cleanup_volumes(self.context, instance.uuid, bdms)
volume_delete.assert_called_once_with(self.context,
bdms[1].volume_id)
def test_cleanup_volumes_exception_do_not_raise(self):
instance = fake_instance.fake_instance_obj(self.context)
bdm_dict1 = fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 'fake-id1', 'source_type': 'image',
'delete_on_termination': True})
bdm_dict2 = fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 'fake-id2', 'source_type': 'image',
'delete_on_termination': True})
bdms = block_device_obj.block_device_make_list(self.context,
[bdm_dict1, bdm_dict2])
with mock.patch.object(self.compute.volume_api,
'delete',
side_effect=[test.TestingException(), None]) as volume_delete:
self.compute._cleanup_volumes(self.context, instance.uuid, bdms,
raise_exc=False)
calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms]
self.assertEqual(calls, volume_delete.call_args_list)
def test_cleanup_volumes_exception_raise(self):
instance = fake_instance.fake_instance_obj(self.context)
bdm_dict1 = fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 'fake-id1', 'source_type': 'image',
'delete_on_termination': True})
bdm_dict2 = fake_block_device.FakeDbBlockDeviceDict(
{'volume_id': 'fake-id2', 'source_type': 'image',
'delete_on_termination': True})
bdms = block_device_obj.block_device_make_list(self.context,
[bdm_dict1, bdm_dict2])
with mock.patch.object(self.compute.volume_api,
'delete',
side_effect=[test.TestingException(), None]) as volume_delete:
self.assertRaises(test.TestingException,
self.compute._cleanup_volumes, self.context, instance.uuid,
bdms)
calls = [mock.call(self.context, bdm.volume_id) for bdm in bdms]
self.assertEqual(calls, volume_delete.call_args_list)
def test_stop_instance_task_state_none_power_state_shutdown(self):
# Tests that stop_instance doesn't puke when the instance power_state
# is shutdown and the task_state is None.
instance = fake_instance.fake_instance_obj(
self.context, vm_state=vm_states.ACTIVE,
task_state=None, power_state=power_state.SHUTDOWN)
@mock.patch.object(self.compute, '_get_power_state',
return_value=power_state.SHUTDOWN)
@mock.patch.object(self.compute, '_notify_about_instance_usage')
@mock.patch.object(self.compute, '_power_off_instance')
@mock.patch.object(instance, 'save')
def do_test(save_mock, power_off_mock, notify_mock, get_state_mock):
# run the code
self.compute.stop_instance(self.context, instance, True)
# assert the calls
self.assertEqual(2, get_state_mock.call_count)
notify_mock.assert_has_calls([
mock.call(self.context, instance, 'power_off.start'),
mock.call(self.context, instance, 'power_off.end')
])
power_off_mock.assert_called_once_with(
self.context, instance, True)
save_mock.assert_called_once_with(
expected_task_state=[task_states.POWERING_OFF, None])
self.assertEqual(power_state.SHUTDOWN, instance.power_state)
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.STOPPED, instance.vm_state)
do_test()
def test_reset_network_driver_not_implemented(self):
instance = fake_instance.fake_instance_obj(self.context)
@mock.patch.object(self.compute.driver, 'reset_network',
side_effect=NotImplementedError())
@mock.patch.object(compute_utils, 'add_instance_fault_from_exc')
def do_test(mock_add_fault, mock_reset):
self.assertRaises(messaging.ExpectedException,
self.compute.reset_network,
self.context,
instance)
self.compute = utils.ExceptionHelper(self.compute)
self.assertRaises(NotImplementedError,
self.compute.reset_network,
self.context,
instance)
do_test()
def test_rebuild_default_impl(self):
def _detach(context, bdms):
# NOTE(rpodolyaka): check that instance has been powered off by
# the time we detach block devices, exact calls arguments will be
# checked below
self.assertTrue(mock_power_off.called)
self.assertFalse(mock_destroy.called)
def _attach(context, instance, bdms, do_check_attach=True):
return {'block_device_mapping': 'shared_block_storage'}
def _spawn(context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
self.assertEqual(block_device_info['block_device_mapping'],
'shared_block_storage')
with contextlib.nested(
mock.patch.object(self.compute.driver, 'destroy',
return_value=None),
mock.patch.object(self.compute.driver, 'spawn',
side_effect=_spawn),
mock.patch.object(objects.Instance, 'save',
return_value=None),
mock.patch.object(self.compute, '_power_off_instance',
return_value=None)
) as(
mock_destroy,
mock_spawn,
mock_save,
mock_power_off
):
instance = fake_instance.fake_instance_obj(self.context)
instance.migration_context = None
instance.numa_topology = None
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[task_states.REBUILDING])
self.compute._rebuild_default_impl(self.context,
instance,
None,
[],
admin_password='new_pass',
bdms=[],
detach_block_devices=_detach,
attach_block_devices=_attach,
network_info=None,
recreate=False,
block_device_info=None,
preserve_ephemeral=False)
self.assertTrue(mock_save.called)
self.assertTrue(mock_spawn.called)
mock_destroy.assert_called_once_with(
self.context, instance,
network_info=None, block_device_info=None)
mock_power_off.assert_called_once_with(
self.context, instance, clean_shutdown=True)
@mock.patch.object(utils, 'last_completed_audit_period',
return_value=(0, 0))
@mock.patch.object(time, 'time', side_effect=[10, 20, 21])
@mock.patch.object(objects.InstanceList, 'get_by_host', return_value=[])
@mock.patch.object(objects.BandwidthUsage, 'get_by_instance_uuid_and_mac')
@mock.patch.object(db, 'bw_usage_update')
def test_poll_bandwidth_usage(self, bw_usage_update, get_by_uuid_mac,
get_by_host, time, last_completed_audit):
bw_counters = [{'uuid': 'fake-uuid', 'mac_address': 'fake-mac',
'bw_in': 1, 'bw_out': 2}]
usage = objects.BandwidthUsage()
usage.bw_in = 3
usage.bw_out = 4
usage.last_ctr_in = 0
usage.last_ctr_out = 0
self.flags(bandwidth_poll_interval=1)
get_by_uuid_mac.return_value = usage
_time = timeutils.utcnow()
bw_usage_update.return_value = {'uuid': '', 'mac': '',
'start_period': _time, 'last_refreshed': _time, 'bw_in': 0,
'bw_out': 0, 'last_ctr_in': 0, 'last_ctr_out': 0, 'deleted': 0,
'created_at': _time, 'updated_at': _time, 'deleted_at': _time}
with mock.patch.object(self.compute.driver,
'get_all_bw_counters', return_value=bw_counters):
self.compute._poll_bandwidth_usage(self.context)
get_by_uuid_mac.assert_called_once_with(self.context, 'fake-uuid',
'fake-mac', start_period=0, use_slave=True)
# NOTE(sdague): bw_usage_update happens at some time in
# the future, so what last_refreshed is is irrelevant.
bw_usage_update.assert_called_once_with(self.context, 'fake-uuid',
'fake-mac', 0, 4, 6, 1, 2,
last_refreshed=mock.ANY,
update_cells=False)
def test_reverts_task_state_instance_not_found(self):
# Tests that the reverts_task_state decorator in the compute manager
# will not trace when an InstanceNotFound is raised.
instance = objects.Instance(uuid='fake')
instance_update_mock = mock.Mock(
side_effect=exception.InstanceNotFound(instance_id=instance.uuid))
self.compute._instance_update = instance_update_mock
log_mock = mock.Mock()
manager.LOG = log_mock
@manager.reverts_task_state
def fake_function(self, context, instance):
raise test.TestingException()
self.assertRaises(test.TestingException, fake_function,
self, self.context, instance)
self.assertFalse(log_mock.called)
@mock.patch.object(nova.scheduler.client.SchedulerClient,
'update_instance_info')
def test_update_scheduler_instance_info(self, mock_update):
instance = objects.Instance(uuid='fake')
self.compute._update_scheduler_instance_info(self.context, instance)
self.assertEqual(mock_update.call_count, 1)
args = mock_update.call_args[0]
self.assertNotEqual(args[0], self.context)
self.assertIsInstance(args[0], self.context.__class__)
self.assertEqual(args[1], self.compute.host)
# Send a single instance; check that the method converts to an
# InstanceList
self.assertIsInstance(args[2], objects.InstanceList)
self.assertEqual(args[2].objects[0], instance)
@mock.patch.object(nova.scheduler.client.SchedulerClient,
'delete_instance_info')
def test_delete_scheduler_instance_info(self, mock_delete):
self.compute._delete_scheduler_instance_info(self.context,
mock.sentinel.inst_uuid)
self.assertEqual(mock_delete.call_count, 1)
args = mock_delete.call_args[0]
self.assertNotEqual(args[0], self.context)
self.assertIsInstance(args[0], self.context.__class__)
self.assertEqual(args[1], self.compute.host)
self.assertEqual(args[2], mock.sentinel.inst_uuid)
@mock.patch.object(nova.context.RequestContext, 'elevated')
@mock.patch.object(nova.objects.InstanceList, 'get_by_host')
@mock.patch.object(nova.scheduler.client.SchedulerClient,
'sync_instance_info')
def test_sync_scheduler_instance_info(self, mock_sync, mock_get_by_host,
mock_elevated):
inst1 = objects.Instance(uuid='fake1')
inst2 = objects.Instance(uuid='fake2')
inst3 = objects.Instance(uuid='fake3')
exp_uuids = [inst.uuid for inst in [inst1, inst2, inst3]]
mock_get_by_host.return_value = objects.InstanceList(
objects=[inst1, inst2, inst3])
fake_elevated = context.get_admin_context()
mock_elevated.return_value = fake_elevated
self.compute._sync_scheduler_instance_info(self.context)
mock_get_by_host.assert_called_once_with(
fake_elevated, self.compute.host, expected_attrs=[],
use_slave=True)
mock_sync.assert_called_once_with(fake_elevated, self.compute.host,
exp_uuids)
@mock.patch.object(nova.scheduler.client.SchedulerClient,
'sync_instance_info')
@mock.patch.object(nova.scheduler.client.SchedulerClient,
'delete_instance_info')
@mock.patch.object(nova.scheduler.client.SchedulerClient,
'update_instance_info')
def test_scheduler_info_updates_off(self, mock_update, mock_delete,
mock_sync):
mgr = self.compute
mgr.send_instance_updates = False
mgr._update_scheduler_instance_info(self.context,
mock.sentinel.instance)
mgr._delete_scheduler_instance_info(self.context,
mock.sentinel.instance_uuid)
mgr._sync_scheduler_instance_info(self.context)
# None of the calls should have been made
self.assertFalse(mock_update.called)
self.assertFalse(mock_delete.called)
self.assertFalse(mock_sync.called)
def test_refresh_instance_security_rules_takes_non_object(self):
inst = fake_instance.fake_db_instance()
with mock.patch.object(self.compute.driver,
'refresh_instance_security_rules') as mock_r:
self.compute.refresh_instance_security_rules(self.context, inst)
self.assertIsInstance(mock_r.call_args_list[0][0][0],
objects.Instance)
def test_set_instance_obj_error_state_with_clean_task_state(self):
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.BUILDING, task_state=task_states.SPAWNING)
with mock.patch.object(instance, 'save'):
self.compute._set_instance_obj_error_state(self.context, instance,
clean_task_state=True)
self.assertEqual(vm_states.ERROR, instance.vm_state)
self.assertIsNone(instance.task_state)
def test_set_instance_obj_error_state_by_default(self):
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.BUILDING, task_state=task_states.SPAWNING)
with mock.patch.object(instance, 'save'):
self.compute._set_instance_obj_error_state(self.context, instance)
self.assertEqual(vm_states.ERROR, instance.vm_state)
self.assertEqual(task_states.SPAWNING, instance.task_state)
@mock.patch.object(objects.Instance, 'save')
def test_instance_update(self, mock_save):
instance = objects.Instance(task_state=task_states.SCHEDULING,
vm_state=vm_states.BUILDING)
updates = {'task_state': None, 'vm_state': vm_states.ERROR}
with mock.patch.object(self.compute,
'_update_resource_tracker') as mock_rt:
self.compute._instance_update(self.context, instance, **updates)
self.assertIsNone(instance.task_state)
self.assertEqual(vm_states.ERROR, instance.vm_state)
mock_save.assert_called_once_with()
mock_rt.assert_called_once_with(self.context, instance)
@mock.patch('nova.objects.BlockDeviceMappingList.get_by_instance_uuid')
@mock.patch('nova.compute.manager.ComputeManager._delete_instance')
def test_terminate_instance_no_bdm_volume_id(self, mock_delete_instance,
mock_bdm_get_by_inst):
# Tests that we refresh the bdm list if a volume bdm does not have the
# volume_id set.
instance = fake_instance.fake_instance_obj(
self.context, vm_state=vm_states.ERROR,
task_state=task_states.DELETING)
bdm = fake_block_device.FakeDbBlockDeviceDict(
{'source_type': 'snapshot', 'destination_type': 'volume',
'instance_uuid': instance.uuid, 'device_name': '/dev/vda'})
bdms = block_device_obj.block_device_make_list(self.context, [bdm])
# since the bdms passed in don't have a volume_id, we'll go back to the
# database looking for updated versions
mock_bdm_get_by_inst.return_value = bdms
self.compute.terminate_instance(self.context, instance, bdms, [])
mock_bdm_get_by_inst.assert_called_once_with(
self.context, instance.uuid)
mock_delete_instance.assert_called_once_with(
self.context, instance, bdms, mock.ANY)
class ComputeManagerBuildInstanceTestCase(test.NoDBTestCase):
def setUp(self):
super(ComputeManagerBuildInstanceTestCase, self).setUp()
self.compute = importutils.import_object(CONF.compute_manager)
self.context = context.RequestContext('fake', 'fake')
self.instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE,
expected_attrs=['metadata', 'system_metadata', 'info_cache'])
self.admin_pass = 'pass'
self.injected_files = []
self.image = {}
self.node = 'fake-node'
self.limits = {}
self.requested_networks = []
self.security_groups = []
self.block_device_mapping = []
self.filter_properties = {'retry': {'num_attempts': 1,
'hosts': [[self.compute.host,
'fake-node']]}}
def fake_network_info():
return network_model.NetworkInfo([{'address': '1.2.3.4'}])
self.network_info = network_model.NetworkInfoAsyncWrapper(
fake_network_info)
self.block_device_info = self.compute._prep_block_device(context,
self.instance, self.block_device_mapping)
# override tracker with a version that doesn't need the database:
fake_rt = fake_resource_tracker.FakeResourceTracker(self.compute.host,
self.compute.driver, self.node)
self.compute._resource_tracker_dict[self.node] = fake_rt
def _do_build_instance_update(self, reschedule_update=False):
self.mox.StubOutWithMock(self.instance, 'save')
self.instance.save(
expected_task_state=(task_states.SCHEDULING, None)).AndReturn(
self.instance)
if reschedule_update:
self.instance.save().AndReturn(self.instance)
def _build_and_run_instance_update(self):
self.mox.StubOutWithMock(self.instance, 'save')
self._build_resources_instance_update(stub=False)
self.instance.save(expected_task_state=
task_states.BLOCK_DEVICE_MAPPING).AndReturn(self.instance)
def _build_resources_instance_update(self, stub=True):
if stub:
self.mox.StubOutWithMock(self.instance, 'save')
self.instance.save().AndReturn(self.instance)
def _notify_about_instance_usage(self, event, stub=True, **kwargs):
if stub:
self.mox.StubOutWithMock(self.compute,
'_notify_about_instance_usage')
self.compute._notify_about_instance_usage(self.context, self.instance,
event, **kwargs)
def _instance_action_events(self):
self.mox.StubOutWithMock(objects.InstanceActionEvent, 'event_start')
self.mox.StubOutWithMock(objects.InstanceActionEvent,
'event_finish_with_failure')
objects.InstanceActionEvent.event_start(
self.context, self.instance.uuid, mox.IgnoreArg(),
want_result=False)
objects.InstanceActionEvent.event_finish_with_failure(
self.context, self.instance.uuid, mox.IgnoreArg(),
exc_val=mox.IgnoreArg(), exc_tb=mox.IgnoreArg(),
want_result=False)
@staticmethod
def _assert_build_instance_hook_called(mock_hooks, result):
# NOTE(coreywright): we want to test the return value of
# _do_build_and_run_instance, but it doesn't bubble all the way up, so
# mock the hooking, which allows us to test that too, though a little
# too intimately
mock_hooks.setdefault().run_post.assert_called_once_with(
'build_instance', result, mock.ANY, mock.ANY, f=None)
@mock.patch('nova.hooks._HOOKS')
@mock.patch('nova.utils.spawn_n')
def test_build_and_run_instance_called_with_proper_args(self, mock_spawn,
mock_hooks):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self._do_build_instance_update()
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties)
self._instance_action_events()
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
self._assert_build_instance_hook_called(mock_hooks,
build_results.ACTIVE)
# This test when sending an icehouse compatible rpc call to juno compute
# node, NetworkRequest object can load from three items tuple.
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.compute.manager.ComputeManager._build_and_run_instance')
@mock.patch('nova.utils.spawn_n')
def test_build_and_run_instance_with_icehouse_requested_network(
self, mock_spawn, mock_build_and_run, mock_save):
fake_server_actions.stub_out_action_events(self.stubs)
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
mock_save.return_value = self.instance
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=[objects.NetworkRequest(
network_id='fake_network_id',
address='10.0.0.1',
port_id='fake_port_id')],
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
requested_network = mock_build_and_run.call_args[0][5][0]
self.assertEqual('fake_network_id', requested_network.network_id)
self.assertEqual('10.0.0.1', str(requested_network.address))
self.assertEqual('fake_port_id', requested_network.port_id)
@mock.patch('nova.hooks._HOOKS')
@mock.patch('nova.utils.spawn_n')
def test_build_abort_exception(self, mock_spawn, mock_hooks):
def fake_spawn(f, *args, **kwargs):
# NOTE(danms): Simulate the detached nature of spawn so that
# we confirm that the inner task has the fault logic
try:
return f(*args, **kwargs)
except Exception:
pass
mock_spawn.side_effect = fake_spawn
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(self.compute,
'_nil_out_instance_obj_host_and_node')
self.mox.StubOutWithMock(self.compute, '_set_instance_obj_error_state')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
self._do_build_instance_update()
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties).AndRaise(
exception.BuildAbortException(reason='',
instance_uuid=self.instance.uuid))
self.compute._cleanup_allocated_networks(self.context, self.instance,
self.requested_networks)
self.compute._cleanup_volumes(self.context, self.instance.uuid,
self.block_device_mapping, raise_exc=False)
compute_utils.add_instance_fault_from_exc(self.context,
self.instance, mox.IgnoreArg(), mox.IgnoreArg())
self.compute._nil_out_instance_obj_host_and_node(self.instance)
self.compute._set_instance_obj_error_state(self.context, self.instance,
clean_task_state=True)
self._instance_action_events()
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
self._assert_build_instance_hook_called(mock_hooks,
build_results.FAILED)
@mock.patch('nova.hooks._HOOKS')
@mock.patch('nova.utils.spawn_n')
def test_rescheduled_exception(self, mock_spawn, mock_hooks):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute, '_set_instance_obj_error_state')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
self.mox.StubOutWithMock(self.compute,
'_nil_out_instance_obj_host_and_node')
self.mox.StubOutWithMock(self.compute.network_api,
'cleanup_instance_network_on_host')
self._do_build_instance_update(reschedule_update=True)
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties).AndRaise(
exception.RescheduledException(reason='',
instance_uuid=self.instance.uuid))
self.compute.network_api.cleanup_instance_network_on_host(self.context,
self.instance, self.compute.host)
self.compute._nil_out_instance_obj_host_and_node(self.instance)
self.compute.compute_task_api.build_instances(self.context,
[self.instance], self.image, self.filter_properties,
self.admin_pass, self.injected_files, self.requested_networks,
self.security_groups, self.block_device_mapping)
self._instance_action_events()
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
self._assert_build_instance_hook_called(mock_hooks,
build_results.RESCHEDULED)
def test_rescheduled_exception_with_non_ascii_exception(self):
exc = exception.NovaException(u's\xe9quence')
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.compute._build_networks_for_instance(self.context, self.instance,
self.requested_networks, self.security_groups).AndReturn(
self.network_info)
self.compute._shutdown_instance(self.context, self.instance,
self.block_device_mapping, self.requested_networks,
try_deallocate_networks=False)
self._notify_about_instance_usage('create.start',
extra_usage_info={'image_name': self.image.get('name')})
self.compute.driver.spawn(self.context, self.instance, self.image,
self.injected_files, self.admin_pass,
network_info=self.network_info,
block_device_info=self.block_device_info).AndRaise(exc)
self._notify_about_instance_usage('create.error',
fault=exc, stub=False)
self.mox.ReplayAll()
with mock.patch.object(self.instance, 'save') as mock_save:
self.assertRaises(exception.RescheduledException,
self.compute._build_and_run_instance,
self.context, self.instance, self.image,
self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node,
self.limits, self.filter_properties)
mock_save.assert_has_calls([
mock.call(),
mock.call(),
mock.call(expected_task_state='block_device_mapping'),
])
@mock.patch.object(manager.ComputeManager, '_build_and_run_instance')
@mock.patch.object(conductor_api.ComputeTaskAPI, 'build_instances')
@mock.patch.object(network_api.API, 'cleanup_instance_network_on_host')
@mock.patch.object(objects.Instance, 'save')
@mock.patch.object(objects.InstanceActionEvent, 'event_start')
@mock.patch.object(objects.InstanceActionEvent,
'event_finish_with_failure')
@mock.patch.object(virt_driver.ComputeDriver, 'macs_for_instance')
def test_rescheduled_exception_with_network_allocated(self,
mock_macs_for_instance, mock_event_finish,
mock_event_start, mock_ins_save, mock_cleanup_network,
mock_build_ins, mock_build_and_run):
instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE,
system_metadata={'network_allocated': 'True'},
expected_attrs=['metadata', 'system_metadata', 'info_cache'])
mock_ins_save.return_value = instance
mock_macs_for_instance.return_value = []
mock_build_and_run.side_effect = exception.RescheduledException(
reason='', instance_uuid=self.instance.uuid)
self.compute._do_build_and_run_instance(self.context, instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
mock_build_and_run.assert_called_once_with(self.context,
instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties)
mock_cleanup_network.assert_called_once_with(
self.context, instance, self.compute.host)
mock_build_ins.assert_called_once_with(self.context,
[instance], self.image, self.filter_properties,
self.admin_pass, self.injected_files, self.requested_networks,
self.security_groups, self.block_device_mapping)
@mock.patch('nova.hooks._HOOKS')
@mock.patch('nova.utils.spawn_n')
def test_rescheduled_exception_without_retry(self, mock_spawn, mock_hooks):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(self.compute, '_set_instance_obj_error_state')
self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(self.compute,
'_nil_out_instance_obj_host_and_node')
self._do_build_instance_update()
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
{}).AndRaise(
exception.RescheduledException(reason='',
instance_uuid=self.instance.uuid))
self.compute._cleanup_allocated_networks(self.context, self.instance,
self.requested_networks)
compute_utils.add_instance_fault_from_exc(self.context, self.instance,
mox.IgnoreArg(), mox.IgnoreArg())
self.compute._nil_out_instance_obj_host_and_node(self.instance)
self.compute._set_instance_obj_error_state(self.context, self.instance,
clean_task_state=True)
self._instance_action_events()
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties={},
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
self._assert_build_instance_hook_called(mock_hooks,
build_results.FAILED)
@mock.patch('nova.hooks._HOOKS')
@mock.patch('nova.utils.spawn_n')
def test_rescheduled_exception_do_not_deallocate_network(self, mock_spawn,
mock_hooks):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute.driver,
'deallocate_networks_on_reschedule')
self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
self.mox.StubOutWithMock(self.compute,
'_nil_out_instance_obj_host_and_node')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
self.mox.StubOutWithMock(self.compute.network_api,
'cleanup_instance_network_on_host')
self._do_build_instance_update(reschedule_update=True)
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties).AndRaise(
exception.RescheduledException(reason='',
instance_uuid=self.instance.uuid))
self.compute.driver.deallocate_networks_on_reschedule(
self.instance).AndReturn(False)
self.compute.network_api.cleanup_instance_network_on_host(
self.context, self.instance, self.compute.host)
self.compute._nil_out_instance_obj_host_and_node(self.instance)
self.compute.compute_task_api.build_instances(self.context,
[self.instance], self.image, self.filter_properties,
self.admin_pass, self.injected_files, self.requested_networks,
self.security_groups, self.block_device_mapping)
self._instance_action_events()
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
self._assert_build_instance_hook_called(mock_hooks,
build_results.RESCHEDULED)
@mock.patch('nova.hooks._HOOKS')
@mock.patch('nova.utils.spawn_n')
def test_rescheduled_exception_deallocate_network(self, mock_spawn,
mock_hooks):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute.driver,
'deallocate_networks_on_reschedule')
self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
self.mox.StubOutWithMock(self.compute,
'_nil_out_instance_obj_host_and_node')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
self._do_build_instance_update(reschedule_update=True)
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties).AndRaise(
exception.RescheduledException(reason='',
instance_uuid=self.instance.uuid))
self.compute.driver.deallocate_networks_on_reschedule(
self.instance).AndReturn(True)
self.compute._cleanup_allocated_networks(self.context, self.instance,
self.requested_networks)
self.compute._nil_out_instance_obj_host_and_node(self.instance)
self.compute.compute_task_api.build_instances(self.context,
[self.instance], self.image, self.filter_properties,
self.admin_pass, self.injected_files, self.requested_networks,
self.security_groups, self.block_device_mapping)
self._instance_action_events()
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
self._assert_build_instance_hook_called(mock_hooks,
build_results.RESCHEDULED)
def _test_build_and_run_exceptions(self, exc, set_error=False,
cleanup_volumes=False, nil_out_host_and_node=False):
self.mox.StubOutWithMock(self.compute, '_build_and_run_instance')
self.mox.StubOutWithMock(self.compute, '_cleanup_allocated_networks')
self.mox.StubOutWithMock(self.compute, '_cleanup_volumes')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
self._do_build_instance_update()
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties).AndRaise(exc)
self.compute._cleanup_allocated_networks(self.context, self.instance,
self.requested_networks)
if cleanup_volumes:
self.compute._cleanup_volumes(self.context, self.instance.uuid,
self.block_device_mapping, raise_exc=False)
if nil_out_host_and_node:
self.mox.StubOutWithMock(self.compute,
'_nil_out_instance_obj_host_and_node')
self.compute._nil_out_instance_obj_host_and_node(self.instance)
if set_error:
self.mox.StubOutWithMock(self.compute,
'_set_instance_obj_error_state')
self.mox.StubOutWithMock(compute_utils,
'add_instance_fault_from_exc')
compute_utils.add_instance_fault_from_exc(self.context,
self.instance, mox.IgnoreArg(), mox.IgnoreArg())
self.compute._set_instance_obj_error_state(self.context,
self.instance, clean_task_state=True)
self._instance_action_events()
self.mox.ReplayAll()
with contextlib.nested(
mock.patch('nova.utils.spawn_n'),
mock.patch('nova.hooks._HOOKS')
) as (
mock_spawn,
mock_hooks
):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
self._assert_build_instance_hook_called(mock_hooks,
build_results.FAILED)
def test_build_and_run_notfound_exception(self):
self._test_build_and_run_exceptions(exception.InstanceNotFound(
instance_id=''))
def test_build_and_run_unexpecteddeleting_exception(self):
self._test_build_and_run_exceptions(
exception.UnexpectedDeletingTaskStateError(
instance_uuid='fake_uuid', expected={}, actual={}))
def test_build_and_run_buildabort_exception(self):
self._test_build_and_run_exceptions(
exception.BuildAbortException(instance_uuid='', reason=''),
set_error=True, cleanup_volumes=True, nil_out_host_and_node=True)
def test_build_and_run_unhandled_exception(self):
self._test_build_and_run_exceptions(test.TestingException(),
set_error=True, cleanup_volumes=True,
nil_out_host_and_node=True)
def test_instance_not_found(self):
exc = exception.InstanceNotFound(instance_id=1)
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.compute._build_networks_for_instance(self.context, self.instance,
self.requested_networks, self.security_groups).AndReturn(
self.network_info)
self.compute._shutdown_instance(self.context, self.instance,
self.block_device_mapping, self.requested_networks,
try_deallocate_networks=False)
self._notify_about_instance_usage('create.start',
extra_usage_info={'image_name': self.image.get('name')})
self.compute.driver.spawn(self.context, self.instance, self.image,
self.injected_files, self.admin_pass,
network_info=self.network_info,
block_device_info=self.block_device_info).AndRaise(exc)
self._notify_about_instance_usage('create.end',
fault=exc, stub=False)
self.mox.ReplayAll()
with mock.patch.object(self.instance, 'save') as mock_save:
self.assertRaises(exception.InstanceNotFound,
self.compute._build_and_run_instance,
self.context, self.instance, self.image,
self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node,
self.limits, self.filter_properties)
mock_save.assert_has_calls([
mock.call(),
mock.call(),
mock.call(expected_task_state='block_device_mapping'),
])
def test_reschedule_on_exception(self):
self.mox.StubOutWithMock(self.compute.driver, 'spawn')
self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.compute._build_networks_for_instance(self.context, self.instance,
self.requested_networks, self.security_groups).AndReturn(
self.network_info)
self.compute._shutdown_instance(self.context, self.instance,
self.block_device_mapping, self.requested_networks,
try_deallocate_networks=False)
self._notify_about_instance_usage('create.start',
extra_usage_info={'image_name': self.image.get('name')})
exc = test.TestingException()
self.compute.driver.spawn(self.context, self.instance, self.image,
self.injected_files, self.admin_pass,
network_info=self.network_info,
block_device_info=self.block_device_info).AndRaise(exc)
self._notify_about_instance_usage('create.error',
fault=exc, stub=False)
self.mox.ReplayAll()
with mock.patch.object(self.instance, 'save') as mock_save:
self.assertRaises(exception.RescheduledException,
self.compute._build_and_run_instance,
self.context, self.instance, self.image,
self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node,
self.limits, self.filter_properties)
mock_save.assert_has_calls([
mock.call(),
mock.call(),
mock.call(expected_task_state='block_device_mapping'),
])
def test_spawn_network_alloc_failure(self):
# Because network allocation is asynchronous, failures may not present
# themselves until the virt spawn method is called.
self._test_build_and_run_spawn_exceptions(exception.NoMoreNetworks())
def test_build_and_run_no_more_fixedips_exception(self):
self._test_build_and_run_spawn_exceptions(
exception.NoMoreFixedIps("error messge"))
def test_build_and_run_flavor_disk_smaller_image_exception(self):
self._test_build_and_run_spawn_exceptions(
exception.FlavorDiskSmallerThanImage(
flavor_size=0, image_size=1))
def test_build_and_run_flavor_disk_smaller_min_disk(self):
self._test_build_and_run_spawn_exceptions(
exception.FlavorDiskSmallerThanMinDisk(
flavor_size=0, image_min_disk=1))
def test_build_and_run_flavor_memory_too_small_exception(self):
self._test_build_and_run_spawn_exceptions(
exception.FlavorMemoryTooSmall())
def test_build_and_run_image_not_active_exception(self):
self._test_build_and_run_spawn_exceptions(
exception.ImageNotActive(image_id=self.image.get('id')))
def test_build_and_run_image_unacceptable_exception(self):
self._test_build_and_run_spawn_exceptions(
exception.ImageUnacceptable(image_id=self.image.get('id'),
reason=""))
def test_build_and_run_invalid_disk_info_exception(self):
self._test_build_and_run_spawn_exceptions(
exception.InvalidDiskInfo(reason=""))
def _test_build_and_run_spawn_exceptions(self, exc):
with contextlib.nested(
mock.patch.object(self.compute.driver, 'spawn',
side_effect=exc),
mock.patch.object(self.instance, 'save',
side_effect=[self.instance, self.instance, self.instance]),
mock.patch.object(self.compute,
'_build_networks_for_instance',
return_value=network_model.NetworkInfo()),
mock.patch.object(self.compute,
'_notify_about_instance_usage'),
mock.patch.object(self.compute,
'_shutdown_instance'),
mock.patch.object(self.compute,
'_validate_instance_group_policy')
) as (spawn, save,
_build_networks_for_instance, _notify_about_instance_usage,
_shutdown_instance, _validate_instance_group_policy):
self.assertRaises(exception.BuildAbortException,
self.compute._build_and_run_instance, self.context,
self.instance, self.image, self.injected_files,
self.admin_pass, self.requested_networks,
self.security_groups, self.block_device_mapping, self.node,
self.limits, self.filter_properties)
_validate_instance_group_policy.assert_called_once_with(
self.context, self.instance, self.filter_properties)
_build_networks_for_instance.assert_has_calls(
[mock.call(self.context, self.instance,
self.requested_networks, self.security_groups)])
_notify_about_instance_usage.assert_has_calls([
mock.call(self.context, self.instance, 'create.start',
extra_usage_info={'image_name': self.image.get('name')}),
mock.call(self.context, self.instance, 'create.error',
fault=exc)])
save.assert_has_calls([
mock.call(),
mock.call(),
mock.call(
expected_task_state=task_states.BLOCK_DEVICE_MAPPING)])
spawn.assert_has_calls([mock.call(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
network_info=self.network_info,
block_device_info=self.block_device_info)])
_shutdown_instance.assert_called_once_with(self.context,
self.instance, self.block_device_mapping,
self.requested_networks, try_deallocate_networks=True)
@mock.patch('nova.utils.spawn_n')
def test_reschedule_on_resources_unavailable(self, mock_spawn):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
reason = 'resource unavailable'
exc = exception.ComputeResourcesUnavailable(reason=reason)
class FakeResourceTracker(object):
def instance_claim(self, context, instance, limits):
raise exc
self.mox.StubOutWithMock(self.compute, '_get_resource_tracker')
self.mox.StubOutWithMock(self.compute.compute_task_api,
'build_instances')
self.mox.StubOutWithMock(self.compute.network_api,
'cleanup_instance_network_on_host')
self.mox.StubOutWithMock(self.compute,
'_nil_out_instance_obj_host_and_node')
self.compute._get_resource_tracker(self.node).AndReturn(
FakeResourceTracker())
self._do_build_instance_update(reschedule_update=True)
self._notify_about_instance_usage('create.start',
extra_usage_info={'image_name': self.image.get('name')})
self._notify_about_instance_usage('create.error',
fault=exc, stub=False)
self.compute.network_api.cleanup_instance_network_on_host(
self.context, self.instance, self.compute.host)
self.compute._nil_out_instance_obj_host_and_node(self.instance)
self.compute.compute_task_api.build_instances(self.context,
[self.instance], self.image, self.filter_properties,
self.admin_pass, self.injected_files, self.requested_networks,
self.security_groups, self.block_device_mapping)
self._instance_action_events()
self.mox.ReplayAll()
self.compute.build_and_run_instance(self.context, self.instance,
self.image, request_spec={},
filter_properties=self.filter_properties,
injected_files=self.injected_files,
admin_password=self.admin_pass,
requested_networks=self.requested_networks,
security_groups=self.security_groups,
block_device_mapping=self.block_device_mapping, node=self.node,
limits=self.limits)
def test_build_resources_buildabort_reraise(self):
exc = exception.BuildAbortException(
instance_uuid=self.instance.uuid, reason='')
self.mox.StubOutWithMock(self.compute, '_build_resources')
self._notify_about_instance_usage('create.start',
extra_usage_info={'image_name': self.image.get('name')})
self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups, self.image,
self.block_device_mapping).AndRaise(exc)
self._notify_about_instance_usage('create.error',
fault=exc, stub=False)
self.mox.ReplayAll()
with mock.patch.object(self.instance, 'save') as mock_save:
self.assertRaises(exception.BuildAbortException,
self.compute._build_and_run_instance,
self.context,
self.instance, self.image, self.injected_files,
self.admin_pass, self.requested_networks,
self.security_groups, self.block_device_mapping,
self.node, self.limits, self.filter_properties)
mock_save.assert_called_once_with()
def test_build_resources_reraises_on_failed_bdm_prep(self):
self.mox.StubOutWithMock(self.compute, '_prep_block_device')
self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
self.compute._build_networks_for_instance(self.context, self.instance,
self.requested_networks, self.security_groups).AndReturn(
self.network_info)
self._build_resources_instance_update()
self.compute._prep_block_device(self.context, self.instance,
self.block_device_mapping).AndRaise(test.TestingException())
self.mox.ReplayAll()
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping):
pass
except Exception as e:
self.assertIsInstance(e, exception.BuildAbortException)
def test_failed_bdm_prep_from_delete_raises_unexpected(self):
with contextlib.nested(
mock.patch.object(self.compute,
'_build_networks_for_instance',
return_value=self.network_info),
mock.patch.object(self.instance, 'save',
side_effect=exception.UnexpectedDeletingTaskStateError(
instance_uuid='fake_uuid',
actual={'task_state': task_states.DELETING},
expected={'task_state': None})),
) as (_build_networks_for_instance, save):
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping):
pass
except Exception as e:
self.assertIsInstance(e,
exception.UnexpectedDeletingTaskStateError)
_build_networks_for_instance.assert_has_calls(
[mock.call(self.context, self.instance,
self.requested_networks, self.security_groups)])
save.assert_has_calls([mock.call()])
def test_build_resources_aborts_on_failed_network_alloc(self):
self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
self.compute._build_networks_for_instance(self.context, self.instance,
self.requested_networks, self.security_groups).AndRaise(
test.TestingException())
self.mox.ReplayAll()
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups, self.image,
self.block_device_mapping):
pass
except Exception as e:
self.assertIsInstance(e, exception.BuildAbortException)
def test_failed_network_alloc_from_delete_raises_unexpected(self):
with mock.patch.object(self.compute,
'_build_networks_for_instance') as _build_networks:
exc = exception.UnexpectedDeletingTaskStateError
_build_networks.side_effect = exc(
instance_uuid='fake_uuid',
actual={'task_state': task_states.DELETING},
expected={'task_state': None})
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping):
pass
except Exception as e:
self.assertIsInstance(e, exc)
_build_networks.assert_has_calls(
[mock.call(self.context, self.instance,
self.requested_networks, self.security_groups)])
def test_build_resources_with_network_info_obj_on_spawn_failure(self):
self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.compute._build_networks_for_instance(self.context, self.instance,
self.requested_networks, self.security_groups).AndReturn(
network_model.NetworkInfo([{'address': '1.2.3.4'}]))
self.compute._shutdown_instance(self.context, self.instance,
self.block_device_mapping, self.requested_networks,
try_deallocate_networks=False)
self._build_resources_instance_update()
self.mox.ReplayAll()
test_exception = test.TestingException()
def fake_spawn():
raise test_exception
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping):
fake_spawn()
except Exception as e:
self.assertEqual(test_exception, e)
def test_build_resources_cleans_up_and_reraises_on_spawn_failure(self):
self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.compute._build_networks_for_instance(self.context, self.instance,
self.requested_networks, self.security_groups).AndReturn(
self.network_info)
self.compute._shutdown_instance(self.context, self.instance,
self.block_device_mapping, self.requested_networks,
try_deallocate_networks=False)
self._build_resources_instance_update()
self.mox.ReplayAll()
test_exception = test.TestingException()
def fake_spawn():
raise test_exception
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping):
fake_spawn()
except Exception as e:
self.assertEqual(test_exception, e)
@mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait')
@mock.patch(
'nova.compute.manager.ComputeManager._build_networks_for_instance')
@mock.patch('nova.objects.Instance.save')
def test_build_resources_instance_not_found_before_yield(
self, mock_save, mock_build_network, mock_info_wait):
mock_build_network.return_value = self.network_info
expected_exc = exception.InstanceNotFound(
instance_id=self.instance.uuid)
mock_save.side_effect = expected_exc
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping):
raise
except Exception as e:
self.assertEqual(expected_exc, e)
mock_build_network.assert_called_once_with(self.context, self.instance,
self.requested_networks, self.security_groups)
mock_info_wait.assert_called_once_with(do_raise=False)
@mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait')
@mock.patch(
'nova.compute.manager.ComputeManager._build_networks_for_instance')
@mock.patch('nova.objects.Instance.save')
def test_build_resources_unexpected_task_error_before_yield(
self, mock_save, mock_build_network, mock_info_wait):
mock_build_network.return_value = self.network_info
mock_save.side_effect = exception.UnexpectedTaskStateError(
instance_uuid='fake_uuid', expected={}, actual={})
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping):
raise
except exception.BuildAbortException:
pass
mock_build_network.assert_called_once_with(self.context, self.instance,
self.requested_networks, self.security_groups)
mock_info_wait.assert_called_once_with(do_raise=False)
@mock.patch('nova.network.model.NetworkInfoAsyncWrapper.wait')
@mock.patch(
'nova.compute.manager.ComputeManager._build_networks_for_instance')
@mock.patch('nova.objects.Instance.save')
def test_build_resources_exception_before_yield(
self, mock_save, mock_build_network, mock_info_wait):
mock_build_network.return_value = self.network_info
mock_save.side_effect = Exception()
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping):
raise
except exception.BuildAbortException:
pass
mock_build_network.assert_called_once_with(self.context, self.instance,
self.requested_networks, self.security_groups)
mock_info_wait.assert_called_once_with(do_raise=False)
def test_build_resources_aborts_on_cleanup_failure(self):
self.mox.StubOutWithMock(self.compute, '_build_networks_for_instance')
self.mox.StubOutWithMock(self.compute, '_shutdown_instance')
self.compute._build_networks_for_instance(self.context, self.instance,
self.requested_networks, self.security_groups).AndReturn(
self.network_info)
self.compute._shutdown_instance(self.context, self.instance,
self.block_device_mapping, self.requested_networks,
try_deallocate_networks=False).AndRaise(
test.TestingException())
self._build_resources_instance_update()
self.mox.ReplayAll()
def fake_spawn():
raise test.TestingException()
try:
with self.compute._build_resources(self.context, self.instance,
self.requested_networks, self.security_groups,
self.image, self.block_device_mapping):
fake_spawn()
except Exception as e:
self.assertIsInstance(e, exception.BuildAbortException)
def test_build_networks_if_not_allocated(self):
instance = fake_instance.fake_instance_obj(self.context,
system_metadata={},
expected_attrs=['system_metadata'])
self.mox.StubOutWithMock(self.compute.network_api,
'get_instance_nw_info')
self.mox.StubOutWithMock(self.compute, '_allocate_network')
self.compute._allocate_network(self.context, instance,
self.requested_networks, None, self.security_groups, None)
self.mox.ReplayAll()
self.compute._build_networks_for_instance(self.context, instance,
self.requested_networks, self.security_groups)
def test_build_networks_if_allocated_false(self):
instance = fake_instance.fake_instance_obj(self.context,
system_metadata=dict(network_allocated='False'),
expected_attrs=['system_metadata'])
self.mox.StubOutWithMock(self.compute.network_api,
'get_instance_nw_info')
self.mox.StubOutWithMock(self.compute, '_allocate_network')
self.compute._allocate_network(self.context, instance,
self.requested_networks, None, self.security_groups, None)
self.mox.ReplayAll()
self.compute._build_networks_for_instance(self.context, instance,
self.requested_networks, self.security_groups)
def test_return_networks_if_found(self):
instance = fake_instance.fake_instance_obj(self.context,
system_metadata=dict(network_allocated='True'),
expected_attrs=['system_metadata'])
def fake_network_info():
return network_model.NetworkInfo([{'address': '123.123.123.123'}])
self.mox.StubOutWithMock(self.compute.network_api,
'get_instance_nw_info')
self.mox.StubOutWithMock(self.compute, '_allocate_network')
self.mox.StubOutWithMock(self.compute.network_api,
'setup_instance_network_on_host')
self.compute.network_api.setup_instance_network_on_host(
self.context, instance, instance.host)
self.compute.network_api.get_instance_nw_info(
self.context, instance).AndReturn(
network_model.NetworkInfoAsyncWrapper(fake_network_info))
self.mox.ReplayAll()
self.compute._build_networks_for_instance(self.context, instance,
self.requested_networks, self.security_groups)
def test_cleanup_allocated_networks_instance_not_found(self):
with contextlib.nested(
mock.patch.object(self.compute, '_deallocate_network'),
mock.patch.object(self.instance, 'save',
side_effect=exception.InstanceNotFound(instance_id=''))
) as (_deallocate_network, save):
# Testing that this doesn't raise an exeption
self.compute._cleanup_allocated_networks(self.context,
self.instance, self.requested_networks)
save.assert_called_once_with()
self.assertEqual('False',
self.instance.system_metadata['network_allocated'])
@mock.patch.object(manager.ComputeManager, '_instance_update')
def test_launched_at_in_create_end_notification(self,
mock_instance_update):
def fake_notify(*args, **kwargs):
if args[2] == 'create.end':
# Check that launched_at is set on the instance
self.assertIsNotNone(args[1].launched_at)
with contextlib.nested(
mock.patch.object(self.compute,
'_update_scheduler_instance_info'),
mock.patch.object(self.compute.driver, 'spawn'),
mock.patch.object(self.compute,
'_build_networks_for_instance', return_value=[]),
mock.patch.object(self.instance, 'save'),
mock.patch.object(self.compute, '_notify_about_instance_usage',
side_effect=fake_notify)
) as (mock_upd, mock_spawn, mock_networks, mock_save, mock_notify):
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties)
expected_call = mock.call(self.context, self.instance,
'create.end', extra_usage_info={'message': u'Success'},
network_info=[])
create_end_call = mock_notify.call_args_list[
mock_notify.call_count - 1]
self.assertEqual(expected_call, create_end_call)
def test_access_ip_set_when_instance_set_to_active(self):
self.flags(default_access_ip_network_name='test1')
instance = fake_instance.fake_db_instance()
@mock.patch.object(db, 'instance_update_and_get_original',
return_value=({}, instance))
@mock.patch.object(self.compute.driver, 'spawn')
@mock.patch.object(self.compute, '_build_networks_for_instance',
return_value=fake_network.fake_get_instance_nw_info(
self.stubs))
@mock.patch.object(db, 'instance_extra_update_by_uuid')
@mock.patch.object(self.compute, '_notify_about_instance_usage')
def _check_access_ip(mock_notify, mock_extra, mock_networks,
mock_spawn, mock_db_update):
self.compute._build_and_run_instance(self.context, self.instance,
self.image, self.injected_files, self.admin_pass,
self.requested_networks, self.security_groups,
self.block_device_mapping, self.node, self.limits,
self.filter_properties)
updates = {'vm_state': u'active', 'access_ip_v6':
netaddr.IPAddress('2001:db8:0:1:dcad:beff:feef:1'),
'access_ip_v4': netaddr.IPAddress('192.168.1.100'),
'power_state': 0, 'task_state': None, 'launched_at':
mock.ANY, 'expected_task_state': 'spawning'}
expected_call = mock.call(self.context, self.instance.uuid,
updates, columns_to_join=['metadata', 'system_metadata',
'info_cache'])
last_update_call = mock_db_update.call_args_list[
mock_db_update.call_count - 1]
self.assertEqual(expected_call, last_update_call)
_check_access_ip()
@mock.patch.object(manager.ComputeManager, '_instance_update')
def test_create_end_on_instance_delete(self, mock_instance_update):
def fake_notify(*args, **kwargs):
if args[2] == 'create.end':
# Check that launched_at is set on the instance
self.assertIsNotNone(args[1].launched_at)
exc = exception.InstanceNotFound(instance_id='')
with contextlib.nested(
mock.patch.object(self.compute.driver, 'spawn'),
mock.patch.object(self.compute,
'_build_networks_for_instance', return_value=[]),
mock.patch.object(self.instance, 'save',
side_effect=[None, None, None, exc]),
mock.patch.object(self.compute, '_notify_about_instance_usage',
side_effect=fake_notify)
) as (mock_spawn, mock_networks, mock_save, mock_notify):
self.assertRaises(exception.InstanceNotFound,
self.compute._build_and_run_instance, self.context,
self.instance, self.image, self.injected_files,
self.admin_pass, self.requested_networks,
self.security_groups, self.block_device_mapping, self.node,
self.limits, self.filter_properties)
expected_call = mock.call(self.context, self.instance,
'create.end', fault=exc)
create_end_call = mock_notify.call_args_list[
mock_notify.call_count - 1]
self.assertEqual(expected_call, create_end_call)
class ComputeManagerMigrationTestCase(test.NoDBTestCase):
def setUp(self):
super(ComputeManagerMigrationTestCase, self).setUp()
self.compute = importutils.import_object(CONF.compute_manager)
self.context = context.RequestContext('fake', 'fake')
self.image = {}
self.instance = fake_instance.fake_instance_obj(self.context,
vm_state=vm_states.ACTIVE,
expected_attrs=['metadata', 'system_metadata', 'info_cache'])
self.migration = objects.Migration(context=self.context.elevated(),
new_instance_type_id=7)
self.migration.status = 'migrating'
fake_server_actions.stub_out_action_events(self.stubs)
@mock.patch.object(objects.Migration, 'save')
@mock.patch.object(objects.Migration, 'obj_as_admin')
def test_errors_out_migration_decorator(self, mock_save,
mock_obj_as_admin):
# Tests that errors_out_migration decorator in compute manager
# sets migration status to 'error' when an exception is raised
# from decorated method
instance = fake_instance.fake_instance_obj(self.context)
migration = objects.Migration()
migration.instance_uuid = instance.uuid
migration.status = 'migrating'
migration.id = 0
@manager.errors_out_migration
def fake_function(self, context, instance, migration):
raise test.TestingException()
mock_obj_as_admin.return_value = mock.MagicMock()
self.assertRaises(test.TestingException, fake_function,
self, self.context, instance, migration)
self.assertEqual('error', migration.status)
mock_save.assert_called_once_with()
mock_obj_as_admin.assert_called_once_with()
def test_finish_resize_failure(self):
with contextlib.nested(
mock.patch.object(self.compute, '_finish_resize',
side_effect=exception.ResizeError(reason='')),
mock.patch.object(db, 'instance_fault_create'),
mock.patch.object(self.compute, '_instance_update'),
mock.patch.object(self.instance, 'save'),
mock.patch.object(self.migration, 'save'),
mock.patch.object(self.migration, 'obj_as_admin',
return_value=mock.MagicMock())
) as (meth, fault_create, instance_update, instance_save,
migration_save, migration_obj_as_admin):
fault_create.return_value = (
test_instance_fault.fake_faults['fake-uuid'][0])
self.assertRaises(
exception.ResizeError, self.compute.finish_resize,
context=self.context, disk_info=[], image=self.image,
instance=self.instance, reservations=[],
migration=self.migration
)
self.assertEqual("error", self.migration.status)
migration_save.assert_called_once_with()
migration_obj_as_admin.assert_called_once_with()
def test_resize_instance_failure(self):
self.migration.dest_host = None
with contextlib.nested(
mock.patch.object(self.compute.driver,
'migrate_disk_and_power_off',
side_effect=exception.ResizeError(reason='')),
mock.patch.object(db, 'instance_fault_create'),
mock.patch.object(self.compute, '_instance_update'),
mock.patch.object(self.migration, 'save'),
mock.patch.object(self.migration, 'obj_as_admin',
return_value=mock.MagicMock()),
mock.patch.object(self.compute.network_api, 'get_instance_nw_info',
return_value=None),
mock.patch.object(self.instance, 'save'),
mock.patch.object(self.compute, '_notify_about_instance_usage'),
mock.patch.object(self.compute,
'_get_instance_block_device_info',
return_value=None),
mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid',
return_value=None),
mock.patch.object(objects.Flavor,
'get_by_id',
return_value=None)
) as (meth, fault_create, instance_update,
migration_save, migration_obj_as_admin, nw_info, save_inst,
notify, vol_block_info, bdm, flavor):
fault_create.return_value = (
test_instance_fault.fake_faults['fake-uuid'][0])
self.assertRaises(
exception.ResizeError, self.compute.resize_instance,
context=self.context, instance=self.instance, image=self.image,
reservations=[], migration=self.migration,
instance_type='type', clean_shutdown=True)
self.assertEqual("error", self.migration.status)
self.assertEqual([mock.call(), mock.call()],
migration_save.mock_calls)
self.assertEqual([mock.call(), mock.call()],
migration_obj_as_admin.mock_calls)
def _test_revert_resize_instance_destroy_disks(self, is_shared=False):
# This test asserts that _is_instance_storage_shared() is called from
# revert_resize() and the return value is passed to driver.destroy().
# Otherwise we could regress this.
@mock.patch.object(self.instance, 'revert_migration_context')
@mock.patch.object(self.compute.network_api, 'get_instance_nw_info')
@mock.patch.object(self.compute, '_is_instance_storage_shared')
@mock.patch.object(self.compute, 'finish_revert_resize')
@mock.patch.object(self.compute, '_instance_update')
@mock.patch.object(self.compute, '_get_resource_tracker')
@mock.patch.object(self.compute.driver, 'destroy')
@mock.patch.object(self.compute.network_api, 'setup_networks_on_host')
@mock.patch.object(self.compute.network_api, 'migrate_instance_start')
@mock.patch.object(compute_utils, 'notify_usage_exists')
@mock.patch.object(self.migration, 'save')
@mock.patch.object(objects.BlockDeviceMappingList,
'get_by_instance_uuid')
def do_test(get_by_instance_uuid,
migration_save,
notify_usage_exists,
migrate_instance_start,
setup_networks_on_host,
destroy,
_get_resource_tracker,
_instance_update,
finish_revert_resize,
_is_instance_storage_shared,
get_instance_nw_info,
revert_migration_context):
self.migration.source_compute = self.instance['host']
# Inform compute that instance uses non-shared or shared storage
_is_instance_storage_shared.return_value = is_shared
self.compute.revert_resize(context=self.context,
migration=self.migration,
instance=self.instance,
reservations=None)
_is_instance_storage_shared.assert_called_once_with(
self.context, self.instance,
host=self.migration.source_compute)
# If instance storage is shared, driver destroy method
# should not destroy disks otherwise it should destroy disks.
destroy.assert_called_once_with(self.context, self.instance,
mock.ANY, mock.ANY, not is_shared)
do_test()
def test_revert_resize_instance_destroy_disks_shared_storage(self):
self._test_revert_resize_instance_destroy_disks(is_shared=True)
def test_revert_resize_instance_destroy_disks_non_shared_storage(self):
self._test_revert_resize_instance_destroy_disks(is_shared=False)
def test_consoles_enabled(self):
self.flags(enabled=False, group='vnc')
self.flags(enabled=False, group='spice')
self.flags(enabled=False, group='rdp')
self.flags(enabled=False, group='serial_console')
self.assertFalse(self.compute._consoles_enabled())
self.flags(enabled=True, group='vnc')
self.assertTrue(self.compute._consoles_enabled())
self.flags(enabled=False, group='vnc')
for console in ['spice', 'rdp', 'serial_console']:
self.flags(enabled=True, group=console)
self.assertTrue(self.compute._consoles_enabled())
self.flags(enabled=False, group=console)
@mock.patch('nova.utils.spawn_n')
@mock.patch('nova.compute.manager.ComputeManager.'
'_do_live_migration')
def _test_max_concurrent_live(self, mock_lm, mock_spawn):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
@mock.patch('nova.objects.Migration.save')
def _do_it(mock_mig_save):
instance = objects.Instance(uuid=str(uuid.uuid4()))
migration = objects.Migration()
self.compute.live_migration(self.context,
mock.sentinel.dest,
instance,
mock.sentinel.block_migration,
migration,
mock.sentinel.migrate_data)
self.assertEqual('queued', migration.status)
migration.save.assert_called_once_with()
with mock.patch.object(self.compute,
'_live_migration_semaphore') as mock_sem:
for i in (1, 2, 3):
_do_it()
self.assertEqual(3, mock_sem.__enter__.call_count)
def test_max_concurrent_live_limited(self):
self.flags(max_concurrent_live_migrations=2)
self._test_max_concurrent_live()
def test_max_concurrent_live_unlimited(self):
self.flags(max_concurrent_live_migrations=0)
self._test_max_concurrent_live()
def test_max_concurrent_live_semaphore_limited(self):
self.flags(max_concurrent_live_migrations=123)
self.assertEqual(
123,
manager.ComputeManager()._live_migration_semaphore.balance)
def test_max_concurrent_live_semaphore_unlimited(self):
self.flags(max_concurrent_live_migrations=0)
compute = manager.ComputeManager()
self.assertEqual(0, compute._live_migration_semaphore.balance)
self.assertIsInstance(compute._live_migration_semaphore,
compute_utils.UnlimitedSemaphore)
def test_max_concurrent_live_semaphore_negative(self):
self.flags(max_concurrent_live_migrations=-2)
compute = manager.ComputeManager()
self.assertEqual(0, compute._live_migration_semaphore.balance)
self.assertIsInstance(compute._live_migration_semaphore,
compute_utils.UnlimitedSemaphore)
|
Francis-Liu/animated-broccoli
|
nova/tests/unit/compute/test_compute_mgr.py
|
Python
|
apache-2.0
| 209,299
|
# =========================================================================
# Copyright 2012-present Yunify, Inc.
# -------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
from qingcloud.cli.misc.utils import explode_array
from qingcloud.cli.iaas_client.actions.base import BaseAction
class DeleteInstanceGroupsAction(BaseAction):
action = 'DeleteInstanceGroups'
command = 'delete-instance-groups'
usage = '%(prog)s -i "instance_group_id,..." [-f <conf_file>]'
@classmethod
def add_ext_arguments(cls, parser):
parser.add_argument('-g', '--instance_groups', dest='instance_groups',
action='store', type=str, default=None,
help='the IDs of groups you want to delete.')
return parser
@classmethod
def build_directive(cls, options):
instance_groups = explode_array(options.instance_groups)
required_params = {
'relation': instance_groups
}
for param in required_params:
if required_params[param] is None or required_params[param] == '':
print('error: [%s] should be specified' % param)
return None
return {
'instance_groups': instance_groups,
}
|
yunify/qingcloud-cli
|
qingcloud/cli/iaas_client/actions/instance_groups/delete_instance_groups.py
|
Python
|
apache-2.0
| 1,929
|
# Copyright (c) 2016 Rackspace Hosting Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron import context
from neutron_lib import exceptions as n_exc
from quark import plugin
from quark.tests.functional.base import BaseFunctionalTest
class QuarkPluginNegativeTest(BaseFunctionalTest):
def setUp(self):
super(QuarkPluginNegativeTest, self).setUp()
self.plugin = plugin.Plugin()
self.blank_context = context.Context(None, None, is_admin=True)
self.with_tenant_id = {"thing": {"tenant_id": "stuff"}}
self.without_tenant_id = {"thing": {"attr": "stuff"}}
class QuarkPluginTenantlessNegativeTests(QuarkPluginNegativeTest):
def test_tenant_check_no_raise(self):
ret = self.plugin._fix_missing_tenant_id(
self.blank_context, self.with_tenant_id, "thing")
self.assertEqual(None, ret)
def test_tenant_check_raises_if_no_tenant(self):
with self.assertRaises(n_exc.BadRequest):
self.plugin._fix_missing_tenant_id(
self.blank_context, self.without_tenant_id, "thing")
def test_tenant_check_no_raise_if_tenant_in_context(self):
self.plugin._fix_missing_tenant_id(
self.context, self.without_tenant_id, "thing")
def test_tenant_check_raises_missing_body(self):
with self.assertRaises(n_exc.BadRequest):
self.plugin._fix_missing_tenant_id(
self.blank_context, {}, "thing")
with self.assertRaises(n_exc.BadRequest):
self.plugin._fix_missing_tenant_id(
self.blank_context, None, "thing")
|
roaet/quark
|
quark/tests/functional/test_plugin.py
|
Python
|
apache-2.0
| 2,112
|
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test helpers for the transformation module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
from scipy import stats
from six.moves import range
import tensorflow.compat.v2 as tf
from tensorflow_graphics.geometry.transformation import axis_angle
from tensorflow_graphics.geometry.transformation import quaternion
from tensorflow_graphics.geometry.transformation import rotation_matrix_2d
from tensorflow_graphics.geometry.transformation import rotation_matrix_3d
def generate_preset_test_euler_angles(dimensions=3):
"""Generates a permutation with duplicate of some classic euler angles."""
permutations = itertools.product(
[0., np.pi, np.pi / 2., np.pi / 3., np.pi / 4., np.pi / 6.],
repeat=dimensions)
return np.array(list(permutations))
def generate_preset_test_translations(dimensions=3):
"""Generates a set of translations."""
permutations = itertools.product([0.1, -0.2, 0.5, 0.7, 0.4, -0.1],
repeat=dimensions)
return np.array(list(permutations))
def generate_preset_test_rotation_matrices_3d():
"""Generates pre-set test 3d rotation matrices."""
angles = generate_preset_test_euler_angles()
preset_rotation_matrix = rotation_matrix_3d.from_euler(angles)
return preset_rotation_matrix
def generate_preset_test_rotation_matrices_2d():
"""Generates pre-set test 2d rotation matrices."""
angles = generate_preset_test_euler_angles(dimensions=1)
preset_rotation_matrix = rotation_matrix_2d.from_euler(angles)
return preset_rotation_matrix
def generate_preset_test_axis_angle():
"""Generates pre-set test rotation matrices."""
angles = generate_preset_test_euler_angles()
axis, angle = axis_angle.from_euler(angles)
return axis, angle
def generate_preset_test_quaternions():
"""Generates pre-set test quaternions."""
angles = generate_preset_test_euler_angles()
preset_quaternion = quaternion.from_euler(angles)
return preset_quaternion
def generate_preset_test_dual_quaternions():
"""Generates pre-set test quaternions."""
angles = generate_preset_test_euler_angles()
preset_quaternion_real = quaternion.from_euler(angles)
translations = generate_preset_test_translations()
translations = np.concatenate(
(translations / 2.0, np.zeros((np.ma.size(translations, 0), 1))), axis=1)
preset_quaternion_translation = tf.convert_to_tensor(value=translations)
preset_quaternion_dual = quaternion.multiply(preset_quaternion_translation,
preset_quaternion_real)
preset_dual_quaternion = tf.concat(
(preset_quaternion_real, preset_quaternion_dual), axis=-1)
return preset_dual_quaternion
def generate_random_test_euler_angles_translations(
dimensions=3,
min_angle=-3.0 * np.pi,
max_angle=3.0 * np.pi,
min_translation=3.0,
max_translation=3.0):
"""Generates random test random Euler angles and translations."""
tensor_dimensions = np.random.randint(3)
tensor_tile = np.random.randint(1, 10, tensor_dimensions).tolist()
return (np.random.uniform(min_angle, max_angle, tensor_tile + [dimensions]),
np.random.uniform(min_translation, max_translation,
tensor_tile + [dimensions]))
def generate_random_test_dual_quaternions():
"""Generates random test dual quaternions."""
angles = generate_random_test_euler_angles()
random_quaternion_real = quaternion.from_euler(angles)
min_translation = -3.0
max_translation = 3.0
translations = np.random.uniform(min_translation, max_translation,
angles.shape)
translations_quaternion_shape = np.asarray(translations.shape)
translations_quaternion_shape[-1] = 1
translations = np.concatenate(
(translations / 2.0, np.zeros(translations_quaternion_shape)), axis=-1)
random_quaternion_translation = tf.convert_to_tensor(value=translations)
random_quaternion_dual = quaternion.multiply(random_quaternion_translation,
random_quaternion_real)
random_dual_quaternion = tf.concat(
(random_quaternion_real, random_quaternion_dual), axis=-1)
return random_dual_quaternion
def generate_random_test_euler_angles(dimensions=3,
min_angle=-3. * np.pi,
max_angle=3. * np.pi):
"""Generates random test random Euler angles."""
tensor_dimensions = np.random.randint(3)
tensor_tile = np.random.randint(1, 10, tensor_dimensions).tolist()
return np.random.uniform(min_angle, max_angle, tensor_tile + [dimensions])
def generate_random_test_quaternions(tensor_shape=None):
"""Generates random test quaternions."""
if tensor_shape is None:
tensor_dimensions = np.random.randint(low=1, high=3)
tensor_shape = np.random.randint(1, 10, size=(tensor_dimensions)).tolist()
u1 = np.random.uniform(0.0, 1.0, tensor_shape)
u2 = np.random.uniform(0.0, 2.0 * math.pi, tensor_shape)
u3 = np.random.uniform(0.0, 2.0 * math.pi, tensor_shape)
a = np.sqrt(1.0 - u1)
b = np.sqrt(u1)
return np.stack((a * np.sin(u2),
a * np.cos(u2),
b * np.sin(u3),
b * np.cos(u3)),
axis=-1) # pyformat: disable
def generate_random_test_axis_angle():
"""Generates random test axis-angles."""
tensor_dimensions = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_dimensions)).tolist()
random_axis = np.random.uniform(size=tensor_shape + [3])
random_axis /= np.linalg.norm(random_axis, axis=-1, keepdims=True)
random_angle = np.random.uniform(size=tensor_shape + [1])
return random_axis, random_angle
def generate_random_test_rotation_matrix_3d():
"""Generates random test 3d rotation matrices."""
random_matrix = np.array(
[stats.special_ortho_group.rvs(3) for _ in range(20)])
return np.reshape(random_matrix, [5, 4, 3, 3])
def generate_random_test_rotation_matrix_2d():
"""Generates random test 2d rotation matrices."""
random_matrix = np.array(
[stats.special_ortho_group.rvs(2) for _ in range(20)])
return np.reshape(random_matrix, [5, 4, 2, 2])
def generate_random_test_lbs_blend():
"""Generates random test for the linear blend skinning blend function."""
tensor_dimensions = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_dimensions)).tolist()
random_points = np.random.uniform(size=tensor_shape + [3])
num_weights = np.random.randint(2, 10)
random_weights = np.random.uniform(size=tensor_shape + [num_weights])
random_weights /= np.sum(random_weights, axis=-1, keepdims=True)
random_rotations = np.array(
[stats.special_ortho_group.rvs(3) for _ in range(num_weights)])
random_rotations = np.reshape(random_rotations, [num_weights, 3, 3])
random_translations = np.random.uniform(size=[num_weights, 3])
return random_points, random_weights, random_rotations, random_translations
def generate_preset_test_lbs_blend():
"""Generates preset test for the linear blend skinning blend function."""
points = np.array([[[1.0, 0.0, 0.0], [0.1, 0.2, 0.5]],
[[0.0, 1.0, 0.0], [0.3, -0.5, 0.2]],
[[-0.3, 0.1, 0.3], [0.1, -0.9, -0.4]]])
weights = np.array([[[0.0, 1.0, 0.0, 0.0], [0.4, 0.2, 0.3, 0.1]],
[[0.6, 0.0, 0.4, 0.0], [0.2, 0.2, 0.1, 0.5]],
[[0.0, 0.1, 0.0, 0.9], [0.1, 0.2, 0.3, 0.4]]])
rotations = np.array(
[[[[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0],
[0.0, 0.0, 1.0]],
[[0.36, 0.48, -0.8],
[-0.8, 0.60, 0.00],
[0.48, 0.64, 0.60]],
[[0.0, 0.0, 1.0],
[1.0, 0.0, 0.0],
[0.0, 1.0, 0.0]],
[[0.0, 1.0, 0.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, -1.0]]],
[[[-0.41554751, -0.42205085, -0.80572535],
[0.08028719, -0.89939186, 0.42970716],
[-0.9060211, 0.11387432, 0.40762533]],
[[-0.05240625, -0.24389111, 0.96838562],
[0.99123384, -0.13047444, 0.02078231],
[0.12128095, 0.96098572, 0.2485908]],
[[-0.32722936, -0.06793413, -0.94249981],
[-0.70574479, 0.68082693, 0.19595657],
[0.62836712, 0.72928708, -0.27073072]],
[[-0.22601332, -0.95393284, 0.19730719],
[-0.01189659, 0.20523618, 0.97864017],
[-0.97405157, 0.21883843, -0.05773466]]]]) # pyformat: disable
translations = np.array(
[[[0.1, -0.2, 0.5],
[-0.2, 0.7, 0.7],
[0.8, -0.2, 0.4],
[-0.1, 0.2, -0.3]],
[[0.5, 0.6, 0.9],
[-0.1, -0.3, -0.7],
[0.4, -0.2, 0.8],
[0.7, 0.8, -0.4]]]) # pyformat: disable
blended_points = np.array([[[[0.16, -0.1, 1.18], [0.3864, 0.148, 0.7352]],
[[0.38, 0.4, 0.86], [-0.2184, 0.152, 0.0088]],
[[-0.05, 0.01, -0.46], [-0.3152, -0.004,
-0.1136]]],
[[[-0.15240625, 0.69123384, -0.57871905],
[0.07776242, 0.33587402, 0.55386645]],
[[0.17959584, 0.01269566, 1.22003942],
[0.71406514, 0.6187734, -0.43794053]],
[[0.67662743, 0.94549789, -0.14946982],
[0.88587099, -0.09324637, -0.45012815]]]])
return points, weights, rotations, translations, blended_points
def generate_random_test_axis_angle_translation():
"""Generates random test angles, axes, translations."""
tensor_dimensions = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_dimensions)).tolist()
random_axis = np.random.uniform(size=tensor_shape + [3])
random_axis /= np.linalg.norm(random_axis, axis=-1, keepdims=True)
random_angle = np.random.uniform(size=tensor_shape + [1])
random_translation = np.random.uniform(size=tensor_shape + [3])
return random_axis, random_angle, random_translation
def generate_random_test_points():
"""Generates random 3D points."""
tensor_dimensions = np.random.randint(3)
tensor_shape = np.random.randint(1, 10, size=(tensor_dimensions)).tolist()
random_point = np.random.uniform(size=tensor_shape + [3])
return random_point
|
tensorflow/graphics
|
tensorflow_graphics/geometry/transformation/tests/test_helpers.py
|
Python
|
apache-2.0
| 10,964
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Copy *.mol2 files to each folder, generate *_dock.in files."""
from __future__ import print_function
import os
import re
from shutil import copyfile
mol2_file_name = 'gan1.mol2'
error_list = []
def list_dirs():
return [name for name in os.listdir(".") if os.path.isdir(name)]
def get_first_dock_in_file(dir_name):
all_dock_in_files = [name for name in os.listdir(dir_name)
if name.endswith("dock.in")]
if not all_dock_in_files:
error_list.append('--> NO dock.in file: %s' % dir_name)
return ''
return all_dock_in_files[0]
def modify_dock_in_file(dock_in_file_name, mol2_file_name):
with open(dock_in_file_name, 'r') as f:
lines = f.readlines()
exists_name_re = re.compile("\S+.mol2")
try:
exists_name = exists_name_re.findall(lines[0])[0]
except:
error_list.append(dock_in_file_name)
return ''
else:
lines[0] = lines[0].replace(exists_name, mol2_file_name)
lines[1] = lines[1].replace(exists_name.strip('.mol2'),
mol2_file_name.strip('.mol2'))
return ''.join(lines)
def main():
protern_dirs = list_dirs()
for i, each_dir in enumerate(protern_dirs):
one_dock_in_file = get_first_dock_in_file(each_dir)
if not one_dock_in_file:
print("%4d : ERROR" % (i+1))
continue
print("%4d : %s | %s" % (i+1, each_dir, one_dock_in_file))
out_file = os.path.join(each_dir,
mol2_file_name.strip('.mol2')+'_dock.in')
copyfile(mol2_file_name, os.path.join(each_dir, mol2_file_name))
with open(out_file, 'w') as f:
f.write(modify_dock_in_file(
os.path.join(each_dir,
one_dock_in_file),
mol2_file_name))
with open("error2016.txt", 'w') as f:
f.write('\n'.join(error_list))
if __name__ == '__main__':
main()
|
zxjsdp/bioinfo-scripts
|
Molecular_Computation/multiprocessing_jdock/copy_files.py
|
Python
|
apache-2.0
| 2,035
|
#!/usr/bin/python
"""Utility functions for scripts in my bin diretory.
This module contains common utilities such as wrappers for
error/warning reporting, executing shell commands in a controlled way,
etc. These functions are shared by a number of helper scripts.
"""
import locale
import os
import re
import shlex
import signal
import subprocess
import sys
import tempfile
# Debugging verbosity level (0 -> no output)
flag_debug = 0
# Unit testing mode. If set to 1, throw exception instead of calling exit()
flag_unittest = 0
hrszre = re.compile(r"^([\d\.]+)(\S)$")
factors = {"K": 1024.0, "M": 1048576.0, "G": 1073741824.0}
def verbose(level, msg):
"""Print debug trace output of verbosity level is >= value in 'level'."""
if level <= flag_debug:
sys.stderr.write(msg + "\n")
def verbosity_level():
"""Return debug trace level."""
return flag_debug
def increment_verbosity():
"""Increment debug trace level by 1."""
global flag_debug
flag_debug += 1
def decrement_verbosity():
"""Lower debug trace level by 1."""
global flag_debug
flag_debug -= 1
def unit_test_enable():
"""Set unit testing mode."""
global flag_unittest
sys.stderr.write("+++ unit testing mode enabled +++\n")
flag_unittest = 1
def warning(msg):
"""Issue a warning to stderr."""
sys.stderr.write("warning: " + msg + "\n")
def error(msg):
"""Issue an error to stderr, then exit."""
errm = "error: " + msg + "\n"
sys.stderr.write(errm)
if flag_unittest:
raise Exception(errm)
else:
exit(1)
def docmd(cmd):
"""Run a command via subprocess, issuing fatal error if cmd fails."""
args = shlex.split(cmd)
verbose(2, "+ docmd executing: %s" % cmd)
rc = subprocess.call(args)
if rc != 0:
error("command failed: %s" % cmd)
# Similar to docmd, but return status after issuing failure message
def docmdnf(cmd):
"""Run a command via subprocess, returning exit status."""
args = shlex.split(cmd)
verbose(2, "+ docmd executing: %s" % cmd)
rc = subprocess.call(args)
return rc
# Similar to docmd, but suppress output
def doscmd(cmd, nf=None):
"""Run a command via subprocess, suppressing output unless error."""
verbose(2, "+ doscmd executing: %s" % cmd)
args = shlex.split(cmd)
cmdtf = tempfile.NamedTemporaryFile(mode="w", delete=True)
rc = subprocess.call(args, stdout=cmdtf, stderr=cmdtf)
if rc != 0:
warning("error: command failed (rc=%d) cmd: %s" % (rc, cmd))
warning("output from failing command:")
subprocess.call(["cat", cmdtf.name])
if nf:
return None
error("")
cmdtf.close()
return True
# invoke command, writing output to file
def docmdout(cmd, outfile, nf=None):
"""Run a command via subprocess, writing output to a file."""
verbose(2, "+ docmdout executing: %s > %s" % (cmd, outfile))
args = shlex.split(cmd)
with open(outfile, "w") as outfile:
rc = subprocess.call(args, stdout=outfile)
if rc != 0:
warning("error: command failed (rc=%d) cmd: %s" % (rc, cmd))
if nf:
return None
error("")
return True
# invoke command, writing output to file
def docmderrout(cmd, outfile, nf=None):
"""Run a command via subprocess, writing output to a file."""
verbose(2, "+ docmdout executing: %s > %s" % (cmd, outfile))
args = shlex.split(cmd)
try:
with open(outfile, "w") as outfile:
rc = subprocess.call(args, stdout=outfile, stderr=outfile)
if rc != 0:
if nf:
sys.stderr.write("error: command failed (rc=%d) cmd: %s\n" % (rc, cmd))
return rc
else:
error("command failed (rc=%d) cmd: %s\n" % (rc, cmd))
return rc
except IOError:
error("unable to open %s for writing" % outfile)
# invoke command, reading from one file and writing to another
def docmdinout(cmd, infile, outfile):
"""Run a command via subprocess with input and output file."""
verbose(2, "+ docmdinout executing: %s < %s > %s" % (cmd, infile, outfile))
args = shlex.split(cmd)
cmdtf = tempfile.NamedTemporaryFile(mode="w", delete=True)
with open(infile, "r") as inf:
with open(outfile, "w") as outf:
rc = subprocess.call(args, stdout=outf, stdin=inf, stderr=cmdtf)
if rc != 0:
warning("error: command failed (rc=%d) cmd: %s" % (rc, cmd))
warning("output from failing command:")
subprocess.call(["cat", cmdtf.name])
return 1
verbose(2, "+ finished: %s < %s > %s" % (cmd, infile, outfile))
return 0
# invoke command, returning array of lines read from it
def docmdlines(cmd, nf=None):
"""Run a command via subprocess, returning output as an array of lines."""
verbose(2, "+ docmdlines executing: %s" % cmd)
args = shlex.split(cmd)
mypipe = subprocess.Popen(args, stdout=subprocess.PIPE)
encoding = locale.getdefaultlocale()[1]
pout, perr = mypipe.communicate()
if mypipe.returncode != 0:
if perr:
decoded_err = perr.decode(encoding)
warning(decoded_err)
if nf:
return None
error("command failed (rc=%d): cmd was %s" % (mypipe.returncode, args))
decoded = pout.decode(encoding)
lines = decoded.strip().split("\n")
return lines
# invoke command, returning raw bytes from read
def docmdbytes(cmd, nf=None):
"""Run a command via subprocess, returning output as raw bytestring."""
args = shlex.split(cmd)
mypipe = subprocess.Popen(args, stdout=subprocess.PIPE)
pout, perr = mypipe.communicate()
if mypipe.returncode != 0:
encoding = locale.getdefaultlocale()[1]
if perr:
decoded_err = perr.decode(encoding)
warning(decoded_err)
if nf:
return None
error("command failed (rc=%d): cmd was %s" % (mypipe.returncode, args))
return pout
# invoke a command with input coming from an echo'd string, e.g.
# Ex: "echo 1+2 | perl"
def docmdinstring(cmd, instring):
"""Invoke a command with stdin coming from a specific string."""
verbose(2, "+ docmdinstring executing: echo %s | %s " % (cmd, instring))
args = shlex.split(cmd)
mypipe = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
encoding = locale.getdefaultlocale()[1]
pout, perr = mypipe.communicate(instring)
if mypipe.returncode != 0:
if perr:
decoded_err = perr.decode(encoding)
warning(decoded_err)
error("command failed (rc=%d): cmd was %s" % (mypipe.returncode, args))
decoded = pout.decode(encoding)
lines = decoded.strip().split("\n")
return lines
# Execute a command with an alarm timeout.
def docmdwithtimeout(cmd, timeout_duration):
"""Run a command via subprocess, returning exit status or -1 if timeout."""
class TimeoutError(Exception):
pass
def handler(signum, frame):
raise TimeoutError()
# set the timeout handler
prevhandler = signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout_duration)
try:
result = docmdnf(cmd)
except TimeoutError as exc:
verbose(1, "timeout triggered after %d seconds" % timeout_duration)
result = -1
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, prevhandler)
return result
# perform default locale setup if needed
def setdeflanglocale():
if "LANG" not in os.environ:
warning("no env setting for LANG -- using default values")
os.environ["LANG"] = "en_US.UTF-8"
os.environ["LANGUAGE"] = "en_US:"
def determine_btrfs_ssdroot(here):
"""Determine ssd root."""
path_components = here.split("/")
root = "/%s" % path_components[1]
verbose(2, "cwd=%s root=%s" % (here, root))
# Is this a BTRFS ssd to begin with?
outlines = docmdlines("stat -f --printf=%%T %s" % root)
if not outlines:
error("internal error-- could not determine FS type "
"for root dir %s" % root)
if outlines[0] != "btrfs":
error("current FS type is %s, not btrfs (can't proceed)" % outlines[0])
return root
def hr_size_to_bytes(sz):
"""Convert human readable size back to bytes."""
m = hrszre.match(sz)
if not m:
warning("unmatchable size expr %s" % sz)
return None
val = float(m.group(1))
facs = m.group(2)
if facs not in factors:
warning("unknown factor '%s' in size expr %s" % (facs, sz))
return None
fac = factors[facs]
nb = int(val * fac)
return nb
def trim_perf_report_file(infile):
"""Trim trailing spaces from lines in perf report."""
verbose(1, "trim: reading " + infile)
try:
f = open(infile, "r")
except IOError:
warning("unable to open file %s for reading" % infile)
return 1
lines = f.readlines()
f.close()
verbose(1, "trim: rewriting " + infile)
try:
ft = open(infile, "w")
except IOError:
warning("unable to open file %s for rewriting" % infile)
return 1
for line in lines:
sline = line.rstrip()
ft.write(sline + "\n")
ft.close()
return 0
|
thanm/dragongo
|
tools/script_utils.py
|
Python
|
apache-2.0
| 8,740
|
# SPDX-FileCopyrightText: 2021-2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import json
import os
import sys
import click
from idf_monitor_base.output_helpers import yellow_print
from idf_py_actions.errors import FatalError, NoSerialPortFoundError
from idf_py_actions.global_options import global_options
from idf_py_actions.tools import ensure_build_directory, get_sdkconfig_value, run_target, run_tool
PYTHON = sys.executable
def action_extensions(base_actions, project_path):
def _get_project_desc(ctx, args):
desc_path = os.path.join(args.build_dir, 'project_description.json')
if not os.path.exists(desc_path):
ensure_build_directory(args, ctx.info_name)
with open(desc_path, 'r') as f:
project_desc = json.load(f)
return project_desc
def _get_default_serial_port(args):
# Import is done here in order to move it after the check_environment() ensured that pyserial has been installed
try:
import serial.tools.list_ports
esptool_path = os.path.join(os.environ['IDF_PATH'], 'components/esptool_py/esptool/')
sys.path.insert(0, esptool_path)
import esptool
ports = list(sorted(p.device for p in serial.tools.list_ports.comports()))
# high baud rate could cause the failure of creation of the connection
esp = esptool.get_default_connected_device(serial_list=ports, port=None, connect_attempts=4,
initial_baud=115200)
if esp is None:
raise NoSerialPortFoundError(
"No serial ports found. Connect a device, or use '-p PORT' option to set a specific port.")
serial_port = esp.serial_port
esp._port.close()
return serial_port
except NoSerialPortFoundError:
raise
except Exception as e:
raise FatalError('An exception occurred during detection of the serial port: {}'.format(e))
def _get_esptool_args(args):
esptool_path = os.path.join(os.environ['IDF_PATH'], 'components/esptool_py/esptool/esptool.py')
esptool_wrapper_path = os.environ.get('ESPTOOL_WRAPPER', '')
if args.port is None:
args.port = _get_default_serial_port(args)
result = [PYTHON]
if os.path.exists(esptool_wrapper_path):
result += [esptool_wrapper_path]
result += [esptool_path]
result += ['-p', args.port]
result += ['-b', str(args.baud)]
with open(os.path.join(args.build_dir, 'flasher_args.json')) as f:
flasher_args = json.load(f)
extra_esptool_args = flasher_args['extra_esptool_args']
result += ['--before', extra_esptool_args['before']]
result += ['--after', extra_esptool_args['after']]
result += ['--chip', extra_esptool_args['chip']]
if not extra_esptool_args['stub']:
result += ['--no-stub']
return result
def _get_commandline_options(ctx):
""" Return all the command line options up to first action """
# This approach ignores argument parsing done Click
result = []
for arg in sys.argv:
if arg in ctx.command.commands_with_aliases:
break
result.append(arg)
return result
def monitor(action, ctx, args, print_filter, monitor_baud, encrypted, timestamps, timestamp_format):
"""
Run idf_monitor.py to watch build output
"""
project_desc = _get_project_desc(ctx, args)
elf_file = os.path.join(args.build_dir, project_desc['app_elf'])
idf_monitor = os.path.join(os.environ['IDF_PATH'], 'tools/idf_monitor.py')
monitor_args = [PYTHON, idf_monitor]
if project_desc['target'] != 'linux':
esp_port = args.port or _get_default_serial_port(args)
monitor_args += ['-p', esp_port]
baud = monitor_baud or os.getenv('IDF_MONITOR_BAUD') or os.getenv('MONITORBAUD')
if baud is None:
# Baud hasn't been changed locally (by local baud argument nor by environment variables)
#
# Use the global baud rate if it has been changed by the command line.
# Use project_desc['monitor_baud'] as the last option.
global_baud_defined = ctx._parameter_source['baud'] == click.core.ParameterSource.COMMANDLINE
baud = args.baud if global_baud_defined else project_desc['monitor_baud']
monitor_args += ['-b', baud]
monitor_args += ['--toolchain-prefix', project_desc['monitor_toolprefix']]
coredump_decode = get_sdkconfig_value(project_desc['config_file'], 'CONFIG_ESP_COREDUMP_DECODE')
if coredump_decode is not None:
monitor_args += ['--decode-coredumps', coredump_decode]
target_arch_riscv = get_sdkconfig_value(project_desc['config_file'], 'CONFIG_IDF_TARGET_ARCH_RISCV')
monitor_args += ['--target', project_desc['target']]
revision = project_desc.get('rev')
if revision:
monitor_args += ['--revision', revision]
if target_arch_riscv:
monitor_args += ['--decode-panic', 'backtrace']
if print_filter is not None:
monitor_args += ['--print_filter', print_filter]
if elf_file:
monitor_args += [elf_file]
if encrypted:
monitor_args += ['--encrypted']
if timestamps:
monitor_args += ['--timestamps']
if timestamp_format:
monitor_args += ['--timestamp-format', timestamp_format]
idf_py = [PYTHON] + _get_commandline_options(ctx) # commands to re-run idf.py
monitor_args += ['-m', ' '.join("'%s'" % a for a in idf_py)]
run_tool('idf_monitor', monitor_args, args.project_dir)
def flash(action, ctx, args):
"""
Run esptool to flash the entire project, from an argfile generated by the build system
"""
ensure_build_directory(args, ctx.info_name)
project_desc = _get_project_desc(ctx, args)
if project_desc['target'] == 'linux':
yellow_print('skipping flash since running on linux...')
return
esp_port = args.port or _get_default_serial_port(args)
run_target(action, args, {'ESPBAUD': str(args.baud), 'ESPPORT': esp_port})
def erase_flash(action, ctx, args):
ensure_build_directory(args, ctx.info_name)
esptool_args = _get_esptool_args(args)
esptool_args += ['erase_flash']
run_tool('esptool.py', esptool_args, args.build_dir)
def global_callback(ctx, global_args, tasks):
encryption = any([task.name in ('encrypted-flash', 'encrypted-app-flash') for task in tasks])
if encryption:
for task in tasks:
if task.name == 'monitor':
task.action_args['encrypted'] = True
break
baud_rate = {
'names': ['-b', '--baud'],
'help': 'Baud rate for flashing. It can imply monitor baud rate as well if it hasn\'t been defined locally.',
'scope': 'global',
'envvar': 'ESPBAUD',
'default': 460800,
}
port = {
'names': ['-p', '--port'],
'help': 'Serial port.',
'scope': 'global',
'envvar': 'ESPPORT',
'default': None,
}
serial_actions = {
'global_action_callbacks': [global_callback],
'actions': {
'flash': {
'callback': flash,
'help': 'Flash the project.',
'options': global_options + [baud_rate, port],
'order_dependencies': ['all', 'erase-flash'],
},
'erase-flash': {
'callback': erase_flash,
'help': 'Erase entire flash chip. Deprecated alias: "erase_flash"',
'options': [baud_rate, port],
},
'erase_flash': {
'callback': erase_flash,
'deprecated': {
'removed': 'v5.0',
'message': 'Please use "erase-flash" instead.',
},
'hidden': True,
'help': 'Erase entire flash chip.',
'options': [baud_rate, port],
},
'monitor': {
'callback':
monitor,
'help':
'Display serial output.',
'options': [
port, {
'names': ['--print-filter', '--print_filter'],
'help':
('Filter monitor output. '
'Restrictions on what to print can be specified as a series of <tag>:<log_level> items '
'where <tag> is the tag string and <log_level> is a character from the set '
'{N, E, W, I, D, V, *} referring to a level. '
'For example, "tag1:W" matches and prints only the outputs written with '
'ESP_LOGW("tag1", ...) or at lower verbosity level, i.e. ESP_LOGE("tag1", ...). '
'Not specifying a <log_level> or using "*" defaults to Verbose level. '
'Please see the IDF Monitor section of the ESP-IDF documentation '
'for a more detailed description and further examples.'),
'default':
None,
}, {
'names': ['--monitor-baud', '-b'],
'type':
click.INT,
'help': ('Baud rate for monitor. '
'If this option is not provided IDF_MONITOR_BAUD and MONITORBAUD '
'environment variables, global baud rate and project_description.json in build directory '
"(generated by CMake from project's sdkconfig) "
'will be checked for default value.'),
}, {
'names': ['--encrypted', '-E'],
'is_flag': True,
'help': ('Enable encrypted flash targets. '
'IDF Monitor will invoke encrypted-flash and encrypted-app-flash targets '
'if this option is set. This option is set by default if IDF Monitor was invoked '
'together with encrypted-flash or encrypted-app-flash target.'),
}, {
'names': ['--timestamps'],
'is_flag': True,
'help': 'Print a time stamp in the beginning of each line.',
}, {
'names': ['--timestamp-format'],
'help': ('Set the formatting of timestamps compatible with strftime(). '
'For example, "%Y-%m-%d %H:%M:%S".'),
'default': None
}
],
'order_dependencies': [
'flash',
'encrypted-flash',
'partition-table-flash',
'bootloader-flash',
'app-flash',
'encrypted-app-flash',
],
},
'partition-table-flash': {
'callback': flash,
'help': 'Flash partition table only. Deprecated alias: "partition_table-flash".',
'options': [baud_rate, port],
'order_dependencies': ['partition-table', 'erase-flash'],
},
'partition_table-flash': {
'callback': flash,
'hidden': True,
'help': 'Flash partition table only.',
'options': [baud_rate, port],
'order_dependencies': ['partition-table', 'erase-flash'],
},
'bootloader-flash': {
'callback': flash,
'help': 'Flash bootloader only.',
'options': [baud_rate, port],
'order_dependencies': ['bootloader', 'erase-flash'],
},
'app-flash': {
'callback': flash,
'help': 'Flash the app only.',
'options': [baud_rate, port],
'order_dependencies': ['app', 'erase-flash'],
},
'encrypted-app-flash': {
'callback': flash,
'help': 'Flash the encrypted app only.',
'order_dependencies': ['app', 'erase-flash'],
},
'encrypted-flash': {
'callback': flash,
'help': 'Flash the encrypted project.',
'order_dependencies': ['all', 'erase-flash'],
},
},
}
return serial_actions
|
espressif/esp-idf
|
tools/idf_py_actions/serial_ext.py
|
Python
|
apache-2.0
| 13,062
|
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
import numpy as np
import pandas as pd
def to_H2OFrame():
# Connect to a pre-existing cluster
# TODO: negative testing
## 1. list
# a. single col
python_obj = [1, "a", 2.5, "bcd", 0]
the_frame = h2o.H2OFrame(python_obj)
# pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=1)
# b. 5 cols, 1 row
python_obj = [[1], [2], [3.7], [8], [9]]
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=5)
# c. 3 cols, 5 rows
python_obj = [[6,7,8,9,10], [1,2,3,4,5], [3,2,2,2,2]]
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=3)
python_obj = [["a", "b"], ["c", "d"]]
the_frame = h2o.H2OFrame(python_obj)
# pyunit_utils.check_dims_values(python_obj, the_frame, rows=2, cols=2)
# d. jagged
python_obj = [[6,7,8,9,10], [1,2,3,4], [3,2,2]]
the_frame = h2o.H2OFrame(python_obj)
#pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5) TODO
## 2. tuple
# a. single row
python_obj = (1, "a", 2.5, "bcd", 0)
the_frame = h2o.H2OFrame(python_obj)
# pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=1)
# b. single column
python_obj = ((1,), (2,), (3.7,), (8,), (9,))
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=5)
# c. multiple rows, columns
python_obj = ((6,7,8,9,10), (1,2,3,4,5), (3,2,2,2,2))
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=3)
# d. jagged
python_obj = ((6,7,8,9,10), (1,2,3,4), (3,2,2))
the_frame = h2o.H2OFrame(python_obj)
#pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5) TODO
## 3. list-tuple mixed
# a. single column
python_obj = ((1,), [2], (3.7,), [8], (9,))
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=5)
# b. single column
python_obj = [(1,), [2], (3.7,), [8], (9,)]
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=5)
# c. multiple rows, columns
python_obj = ([6,7,8,9,10], (1,2,3,4,5), [3,2,2,2,2])
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=3)
# d. multiple rows, columns
python_obj = [(6,7,8,9,10), [1,2,3,4,5], (3,2,2,2,2)]
the_frame = h2o.H2OFrame(python_obj)
pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=3)
# e. jagged
python_obj = [(6,7,8,9,10), [1,2,3,4], (3,2,2)]
the_frame = h2o.H2OFrame(python_obj)
# pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5) TODO
# f. jagged
python_obj = ((6,7,8,9,10), [1,2,3,4], (3,2,2))
the_frame = h2o.H2OFrame(python_obj)
# pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5) TODO
## 4. dictionary
# a. single row
# python_obj = {"a":1, "b":"a", "c":2.5, "d":"bcd", "e":0}
# the_frame = h2o.H2OFrame(python_obj)
# pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=5)
# assert set(the_frame.names) == set(python_obj.keys()), "H2OFrame header is hosed. Got {0}, but should have got " \
# "{1}".format(the_frame.names, python_obj.keys())
#
# python_obj = {"a":[1], "b":["a"], "c":[2.5], "d":["bcd"], "e":[0]}
# the_frame = h2o.H2OFrame(python_obj)
# pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=5)
# assert set(the_frame.names) == set(python_obj.keys()), "H2OFrame header is hosed. Got {0}, but should have got " \
# "{1}".format(the_frame.names, python_obj.keys())
#
# # b. single column
# python_obj = {"foo":(1,2,3.7,8,9)}
# the_frame = h2o.H2OFrame(python_obj)
# pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=1)
# assert set(the_frame.names) == set(python_obj.keys()), "H2OFrame header is hosed. Got {0}, but should have got " \
# "{1}".format(the_frame.names, python_obj.keys())
#
# # c. multiple rows, columns
# python_obj = {"foo":[6,7,8,9,10], "bar":(1,2,3,4,5), "baz":(3,2,2,2,2)}
# the_frame = h2o.H2OFrame(python_obj)
# pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=3)
# assert set(the_frame.names) == set(python_obj.keys()), "H2OFrame header is hosed. Got {0}, but should have got " \
# "{1}".format(the_frame.names, python_obj.keys())
#
# # d. jagged
# python_obj = {"foo":(6,7), "bar":(1,2,3,4), "baz":(3,2,2)}
# the_frame = h2o.H2OFrame(python_obj)
# # check_dims_values_jagged() TODO
# assert set(the_frame.names) == set(python_obj.keys()), "H2OFrame header is hosed. Got {0}, but should have got " \
# "{1}".format(the_frame.names, python_obj.keys())
## 5. numpy.ndarray
# a. single row
# python_obj = np.array([1, "a", 2.5, "bcd", 0])
# the_frame = h2o.H2OFrame(python_obj)
# pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=5)
#
# # b. single column
# python_obj = np.array([[1], [2], [3.7], [8], [9]])
# the_frame = h2o.H2OFrame(python_obj)
# pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=1)
#
# # c. multiple rows, columns
# python_obj = np.array([[6,7,8,9,10], [1,2,3,4,5], [3,2,2,2,2]])
# the_frame = h2o.H2OFrame(python_obj)
# pyunit_utils.check_dims_values(python_obj, the_frame, rows=3, cols=5)
#
# # d. jagged
# python_obj = np.array([[6,7,8,9,10], [1,2,3,4], [3,2,2]])
# the_frame = h2o.H2OFrame(python_obj)
# # check_dims_values_jagged() TODO
#
# ## 6. pandas.DataFrame
# # a. single row
# python_obj = pd.DataFrame({'foo' : pd.Series([1]), 'bar' : pd.Series([6]), 'baz' : pd.Series(["a"]) })
# the_frame = h2o.H2OFrame(python_obj)
# pyunit_utils.check_dims_values(python_obj, the_frame, rows=1, cols=3)
#
# # b. single column
# python_obj = pd.DataFrame({'foo' : pd.Series([1, 2, 3, 7.8, 9])})
# the_frame = h2o.H2OFrame(python_obj)
# pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=1)
#
# # c. multiple rows, columns
# python_obj = pd.DataFrame({'foo' : pd.Series([6,7,8,9,10]), 'bar' : pd.Series([1,2,3,4,5]),
# 'baz' : pd.Series([3,2,2,2,2])})
# the_frame = h2o.H2OFrame(python_obj)
# pyunit_utils.check_dims_values(python_obj, the_frame, rows=5, cols=3)
#
# # d. jagged
# python_obj = pd.DataFrame({'foo' : pd.Series([6,7,8]), 'bar' : pd.Series([1,2,3,4,5]), 'baz' : pd.Series([3,2,2,2])})
# the_frame = h2o.H2OFrame(python_obj)
# # check_dims_values_jagged() TODO
if __name__ == "__main__":
pyunit_utils.standalone_test(to_H2OFrame)
else:
to_H2OFrame()
|
madmax983/h2o-3
|
h2o-py/tests/testdir_munging/pyunit_to_H2OFrame.py
|
Python
|
apache-2.0
| 7,294
|
# Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
SourceCollector module
"""
import os
import ConfigParser
import time
import re
from datetime import datetime
from subprocess import check_output
class SourceCollector(object):
"""
SourceCollector class
Responsible for creating a source archive which will contain:
* All sources for that version
* Versioning metadata
* Full changelog
It will also update the repo with all required versioning tags, if appropriate
"""
repo = 'openvstorage/openvstorage'
repo_path_metadata = '/tmp/repo_openvstorage_metadata'
repo_path_code = '/tmp/repo_openvstorage_code'
package_path = '/tmp/packages/openvstorage'
def __init__(self):
"""
Dummy init method, SourceCollector is static
"""
raise NotImplementedError('SourceCollector is a static class')
@staticmethod
def collect(target, revision=None, suffix=None):
"""
Executes the source collecting logic
General steps:
1. Figure out correct code revision, update code repo to that revision
2. Tag that revision, if required
3. Generate changelog, if required
4. Generate version schema
5. Build 'upstream source package'
6. Use this 'upstream source package' for building distribution specific packages
@param target: Specifies the pacakging target. Can be:
* 'experimental' to package against a checked out repo/code
* 'unstable', 'test', 'stable' for automatic packaging for a certain branch
* ('release', '<release branch>') for autoamtic packaging for a release branch
@param revision: Specifies an exact target revision:
* Any existing tag for repackging 'test', 'stable' or 'release' packages
@param suffix: A suffix for release packages (such as 'alpha', 'beta', 'rc1', 'rtm', ...)
"""
print 'Collecting sources'
if not os.path.exists(SourceCollector.repo_path_code):
os.makedirs(SourceCollector.repo_path_code)
if not os.path.exists(SourceCollector.repo_path_metadata):
os.makedirs(SourceCollector.repo_path_metadata)
if not os.path.exists(SourceCollector.package_path):
os.makedirs(SourceCollector.package_path)
# Update the metadata repo
print ' Updating metadata'
SourceCollector._hg_update_to(SourceCollector.repo_path_metadata, 'default')
print ' Updating code'
distribution = target
if target == 'experimental':
suffix = 'exp'
elif isinstance(target, tuple) and target[0] == 'experimental':
print ' Using feature branch {0}'.format(target[1])
suffix = 'exp'
distribution = target[0]
SourceCollector._hg_update_to(SourceCollector.repo_path_code, target[1])
elif target == 'unstable':
suffix = 'rev'
if revision is None:
SourceCollector._hg_update_to(SourceCollector.repo_path_code, 'default')
else:
SourceCollector._hg_update_to(SourceCollector.repo_path_code, revision)
elif target in ['test', 'stable']:
suffix = 'alpha' if target == 'test' else 'beta'
if revision is None:
SourceCollector._hg_update_to(SourceCollector.repo_path_code, target)
else:
SourceCollector._hg_update_to(SourceCollector.repo_path_code, revision)
elif isinstance(target, tuple) and target[0] == 'release':
if len(target) != 2 and revision is None:
raise ValueError('In case a release build is requested, the exact release branch should be passed.')
distribution = target[0]
SourceCollector._hg_update_to(SourceCollector.repo_path_code, target[1] if revision is None else revision)
else:
raise ValueError('Invalid target specified')
if distribution == 'release':
raise NotImplementedError('Release packaging is not yet fully tested. Please fork the repo and test first')
# Get parent brances
branches = ['default']
if distribution == 'test':
branches.append('test')
elif distribution == 'stable':
branches += ['test', 'stable']
elif distribution == 'release':
branches += ['test', 'stable', target[1] if revision is None else revision]
# Get current revision
print ' Fetch current revision'
current_revision = int(SourceCollector._run(
'hg summary', SourceCollector.repo_path_code
).split('\n')[0].split(':')[1].strip())
print ' Revision: {0}'.format(current_revision)
# Get revision timestamp
timestamp = eval(SourceCollector._run("hg log -r {0} --template '{{date}}'".format(current_revision),
SourceCollector.repo_path_code))
revision_date = datetime.fromtimestamp(timestamp)
# Build version
filename = '{0}/packaging/version.cfg'.format(SourceCollector.repo_path_code)
parser = ConfigParser.RawConfigParser()
parser.read(filename)
version = '{0}.{1}.{2}'.format(parser.get('main', 'major'),
parser.get('main', 'minor'),
parser.get('main', 'patch'))
print ' Version: {0}'.format(version)
# Load tag information
tag_data = []
print ' Loading tags'
for raw_tag in SourceCollector._run('hg tags', SourceCollector.repo_path_metadata).split('\n'):
parts = raw_tag.split(' ')
tag = parts[0]
match = re.search('^(?P<version>[0-9]+?\.[0-9]+?\.[0-9]+?)(-(?P<suffix>.+)\.(?P<build>[0-9]+))?$', tag)
if match:
match_dict = match.groupdict()
tag_version = match_dict['version']
tag_build = match_dict['build']
tag_suffix = match_dict['suffix']
rev_number, rev_hash = parts[-1].split(':')
tag_data.append({'version': tag_version,
'build': tag_build,
'suffix': tag_suffix,
'rev_number': rev_number,
'rev_hash': rev_hash})
# Build changelog
increment_build = True
changes_found = False
other_changes = False
changelog = []
if target in ['test', 'stable', 'release']:
print ' Generating changelog'
changelog.append('Open vStorage')
changelog.append('=============')
changelog.append('')
changelog.append('This changelog is generated based on DVCS. Due to the nature of DVCS the')
changelog.append('order of changes in this document can be slightly different from reality.')
if target in ['test', 'stable', 'release']:
log = SourceCollector._run(
"hg log -f -b {0} --template '{{date|shortdate}} {{rev}} {{desc|firstline}}\n'".format(
' -b '.join(branches)
), SourceCollector.repo_path_code
)
for log_line in log.strip().split('\n'):
if SourceCollector._ignore_log(log_line):
continue
date, log_revision, description = log_line.split(' ', 2)
active_tag = None
for tag in tag_data:
if tag['rev_number'] == log_revision and tag['suffix'] >= suffix:
active_tag = tag
if active_tag is not None:
if changes_found is False:
increment_build = False
if other_changes is True:
changelog.append('* Internal updates')
changelog.append('\n{0}{1}\n'.format(
active_tag['version'],
'-{0}.{1}'.format(
active_tag['suffix'], active_tag['build']
) if active_tag['suffix'] is not None else ''
))
other_changes = False
if re.match('^OVS\-[0-9]{1,5}', description):
changelog.append('* {0} - {1}'.format(date, description))
else:
other_changes = True
changes_found = True
if other_changes is True:
changelog.append('* Other internal updates')
# Build buildnumber
print ' Generating build'
if distribution == 'experimental':
build = int(time.time())
elif distribution == 'unstable':
build = current_revision
else:
builds = sorted(tag['build'] for tag in tag_data if tag['version'] == version and tag['suffix'] == suffix)
if len(builds) > 0:
build = int(builds[-1])
if revision is None and increment_build is True:
build += 1
else:
print ' No need to increment build'
else:
build = 1
print ' Build: {0}'.format(build)
# Save changelog
if len(changelog) > 0:
if increment_build is True:
changelog.insert(5, '\n{0}{1}\n'.format(
version,
'-{0}.{1}'.format(suffix, build) if suffix is not None else ''
))
with open('{0}/CHANGELOG.txt'.format(SourceCollector.repo_path_code), 'w') as changelog_file:
changelog_file.write('\n'.join(changelog))
# Version string. Examples:
# * Build from local working directory
# 1.2.0-exp.<timestamp>
# * Unstable branch
# 1.2.0-rev.<revision>
# * Test branch
# 1.2.0-alpha.<build>
# * Stable branch
# 1.2.0-beta.<build>
# * Release branches (e.g. release_1_2)
# 1.2.0-rc1.<build> - release candidate 1
# 1.2.0-rc2.<build> - release candidate 2
# 1.2.0 - final released version
# 1.2.1 - hotfix for 1.2.0
# 1.2.2 - hotfix for 1.2.1
version_string = '{0}{1}'.format(
version,
'-{0}.{1}'.format(suffix, build) if suffix is not None else ''
)
print ' Full version: {0}'.format(version_string)
# Tag revision
if distribution in ['test', 'stable', 'release'] and revision is None and increment_build is True:
print ' Tagging revision'
SourceCollector._run(
'hg tag -r {0} {1}'.format(current_revision, version_string),
SourceCollector.repo_path_metadata
)
SourceCollector._run('hg push', SourceCollector.repo_path_metadata)
# Building archive
print ' Building archive'
SourceCollector._run(
"tar -czf {0}/openvstorage_{1}.tar.gz --transform 's,^,openvstorage-{1}/,' scripts/install scripts/system config ovs webapps *.txt".format(
SourceCollector.package_path, version_string
), SourceCollector.repo_path_code
)
SourceCollector._run('rm -f CHANGELOG.txt', SourceCollector.repo_path_code)
print ' Archive: {0}/openvstorage_{1}.tar.gz'.format(SourceCollector.package_path, version_string)
print 'Done'
return distribution, version, suffix, build, version_string, revision_date
@staticmethod
def _ignore_log(log_line):
"""
Returns whether a mercurial log line should be ignored
"""
if 'Added tag ' in log_line and ' for changeset ' in log_line:
return True
return False
@staticmethod
def _hg_update_to(path, revision):
"""
Updates a given repo to a certain revision, cloning if it does not exist yet
"""
if not os.path.exists('{0}/.hg'.format(path)):
SourceCollector._run('hg clone https://bitbucket.org/{0} {1}'.format(SourceCollector.repo, path), path)
SourceCollector._run('hg pull -u', path)
SourceCollector._run('hg update -r {0}'.format(revision), path)
@staticmethod
def _run(command, working_directory):
"""
Runs a comment, returning the output
"""
os.chdir(working_directory)
return check_output(command, shell=True)
|
mflu/openvstorage_centos
|
packaging/sourcecollector.py
|
Python
|
apache-2.0
| 13,272
|